Merge "Add constant folding for long unary operations in opt. compiler."
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 5d4feb8..3a1bd09 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -33,7 +33,13 @@
ART_BUILD_TARGET_DEBUG ?= true
ART_BUILD_HOST_NDEBUG ?= true
ART_BUILD_HOST_DEBUG ?= true
-ART_BUILD_HOST_STATIC ?= true
+
+# Enable the static builds only for checkbuilds.
+ifneq (,$(filter checkbuild,$(MAKECMDGOALS)))
+ ART_BUILD_HOST_STATIC ?= true
+else
+ ART_BUILD_HOST_STATIC ?= false
+endif
# Asan does not support static linkage
ifdef SANITIZE_HOST
@@ -132,7 +138,7 @@
-DNVALGRIND
# Warn about thread safety violations with clang.
-art_clang_cflags := -Wthread-safety
+art_clang_cflags := -Wthread-safety -Wthread-safety-negative
# Warn if switch fallthroughs aren't annotated.
art_clang_cflags += -Wimplicit-fallthrough
@@ -203,6 +209,11 @@
external/vixl/src \
external/zlib \
+# We optimize Thread::Current() with a direct TLS access. This requires access to a private
+# Bionic header.
+# Note: technically we only need this on device, but this avoids the duplication of the includes.
+ART_C_INCLUDES += bionic/libc/private
+
# Base set of cflags used by all things ART.
art_cflags := \
-fno-rtti \
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 377cd4e..4850e6c 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -165,6 +165,7 @@
runtime/base/hex_dump_test.cc \
runtime/base/histogram_test.cc \
runtime/base/mutex_test.cc \
+ runtime/base/out_test.cc \
runtime/base/scoped_flock_test.cc \
runtime/base/stringprintf_test.cc \
runtime/base/time_utils_test.cc \
@@ -255,6 +256,7 @@
compiler/optimizing/graph_checker_test.cc \
compiler/optimizing/graph_test.cc \
compiler/optimizing/gvn_test.cc \
+ compiler/optimizing/licm_test.cc \
compiler/optimizing/linearize_test.cc \
compiler/optimizing/liveness_test.cc \
compiler/optimizing/live_interval_test.cc \
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index c70e12d..0c0c3df 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -64,14 +64,14 @@
core_compile_options += --compiler-filter=interpret-only
core_infix := -interpreter
endif
- ifeq ($(1),interpreter-access-checks)
+ ifeq ($(1),interp-ac)
core_compile_options += --compiler-filter=verify-at-runtime --runtime-arg -Xverify:softfail
- core_infix := -interpreter-access-checks
+ core_infix := -interp-ac
endif
ifeq ($(1),default)
# Default has no infix, no compile options.
endif
- ifneq ($(filter-out default interpreter interpreter-access-checks jit optimizing,$(1)),)
+ ifneq ($(filter-out default interpreter interp-ac jit optimizing,$(1)),)
#Technically this test is not precise, but hopefully good enough.
$$(error found $(1) expected default, interpreter, interpreter-access-checks, jit or optimizing)
endif
@@ -147,14 +147,14 @@
$(eval $(call create-core-oat-host-rule-combination,default,,))
$(eval $(call create-core-oat-host-rule-combination,optimizing,,))
$(eval $(call create-core-oat-host-rule-combination,interpreter,,))
-$(eval $(call create-core-oat-host-rule-combination,interpreter-access-checks,,))
+$(eval $(call create-core-oat-host-rule-combination,interp-ac,,))
valgrindHOST_CORE_IMG_OUTS :=
valgrindHOST_CORE_OAT_OUTS :=
$(eval $(call create-core-oat-host-rule-combination,default,valgrind,32))
$(eval $(call create-core-oat-host-rule-combination,optimizing,valgrind,32))
$(eval $(call create-core-oat-host-rule-combination,interpreter,valgrind,32))
-$(eval $(call create-core-oat-host-rule-combination,interpreter-access-checks,valgrind,32))
+$(eval $(call create-core-oat-host-rule-combination,interp-ac,valgrind,32))
valgrind-test-art-host-dex2oat-host: $(valgrindHOST_CORE_IMG_OUTS)
@@ -184,14 +184,14 @@
core_compile_options += --compiler-filter=interpret-only
core_infix := -interpreter
endif
- ifeq ($(1),interpreter-access-checks)
+ ifeq ($(1),interp-ac)
core_compile_options += --compiler-filter=verify-at-runtime --runtime-arg -Xverify:softfail
- core_infix := -interpreter-access-checks
+ core_infix := -interp-ac
endif
ifeq ($(1),default)
# Default has no infix, no compile options.
endif
- ifneq ($(filter-out default interpreter interpreter-access-checks jit optimizing,$(1)),)
+ ifneq ($(filter-out default interpreter interp-ac jit optimizing,$(1)),)
# Technically this test is not precise, but hopefully good enough.
$$(error found $(1) expected default, interpreter, interpreter-access-checks, jit or optimizing)
endif
@@ -272,14 +272,14 @@
$(eval $(call create-core-oat-target-rule-combination,default,,))
$(eval $(call create-core-oat-target-rule-combination,optimizing,,))
$(eval $(call create-core-oat-target-rule-combination,interpreter,,))
-$(eval $(call create-core-oat-target-rule-combination,interpreter-access-checks,,))
+$(eval $(call create-core-oat-target-rule-combination,interp-ac,,))
valgrindTARGET_CORE_IMG_OUTS :=
valgrindTARGET_CORE_OAT_OUTS :=
$(eval $(call create-core-oat-target-rule-combination,default,valgrind,32))
$(eval $(call create-core-oat-target-rule-combination,optimizing,valgrind,32))
$(eval $(call create-core-oat-target-rule-combination,interpreter,valgrind,32))
-$(eval $(call create-core-oat-target-rule-combination,interpreter-access-checks,valgrind,32))
+$(eval $(call create-core-oat-target-rule-combination,interp-ac,valgrind,32))
valgrind-test-art-host-dex2oat-target: $(valgrindTARGET_CORE_IMG_OUTS)
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index d215662..dc2bc5c 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -46,12 +46,12 @@
// Create an OatMethod based on pointers (for unit tests).
OatFile::OatMethod CreateOatMethod(const void* code);
- void MakeExecutable(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void MakeExecutable(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
static void MakeExecutable(const void* code_start, size_t code_length);
void MakeExecutable(mirror::ClassLoader* class_loader, const char* class_name)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
protected:
virtual void SetUp();
@@ -76,17 +76,17 @@
virtual void TearDown();
void CompileClass(mirror::ClassLoader* class_loader, const char* class_name)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- void CompileMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CompileMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
void CompileDirectMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
const char* method_name, const char* signature)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CompileVirtualMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
const char* method_name, const char* signature)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void ReserveImageSpace();
diff --git a/compiler/compiler.h b/compiler/compiler.h
index e5d1aff..fcd3434 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -58,7 +58,7 @@
const DexFile& dex_file) const = 0;
virtual uintptr_t GetEntryPointOf(ArtMethod* method) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
uint64_t GetMaximumCompilationTimeBeforeWarning() const {
return maximum_compilation_time_before_warning_;
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index e4570fd..04c58ac 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -135,7 +135,7 @@
// with IGET/IPUT. For fast path fields, retrieve the field offset.
static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
MirIFieldLoweringInfo* field_infos, size_t count)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
// Construct an unresolved instance field lowering info.
explicit MirIFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type, bool is_quickened)
@@ -192,7 +192,7 @@
// and the type index of the declaring class in the compiled method's dex file.
static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
MirSFieldLoweringInfo* field_infos, size_t count)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
// Construct an unresolved static field lowering info.
explicit MirSFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type)
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index dbe9062..23b7c42 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -173,7 +173,17 @@
typedef uint16_t BasicBlockId;
static const BasicBlockId NullBasicBlockId = 0;
-static constexpr bool kLeafOptimization = false;
+
+// Leaf optimization is basically the removal of suspend checks from leaf methods.
+// This is incompatible with SuspendCheckElimination (SCE) which eliminates suspend
+// checks from loops that call any non-intrinsic method, since a loop that calls
+// only a leaf method would end up without any suspend checks at all. So turning
+// this on automatically disables the SCE in MIRGraph::EliminateSuspendChecksGate().
+//
+// Since the Optimizing compiler is actually applying the same optimization, Quick
+// must not run SCE anyway, so we enable this optimization as a way to disable SCE
+// while keeping a consistent behavior across the backends, b/22657404.
+static constexpr bool kLeafOptimization = true;
/*
* In general, vreg/sreg describe Dalvik registers that originated with dx. However,
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
index 946c74b..4512f35 100644
--- a/compiler/dex/mir_method_info.h
+++ b/compiler/dex/mir_method_info.h
@@ -99,7 +99,7 @@
// path methods, retrieve the method's vtable index and direct code and method when applicable.
static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
MirMethodLoweringInfo* method_infos, size_t count)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
MirMethodLoweringInfo(uint16_t method_idx, InvokeType type, bool is_quickened)
: MirMethodInfo(method_idx,
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 5bb0ce3..80b7ac1 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -1724,7 +1724,8 @@
bool MIRGraph::EliminateSuspendChecksGate() {
- if ((cu_->disable_opt & (1 << kSuspendCheckElimination)) != 0 || // Disabled.
+ if (kLeafOptimization || // Incompatible (could create loops without suspend checks).
+ (cu_->disable_opt & (1 << kSuspendCheckElimination)) != 0 || // Disabled.
GetMaxNestedLoops() == 0u || // Nothing to do.
GetMaxNestedLoops() >= 32u || // Only 32 bits in suspend_checks_in_loops_[.].
// Exclude 32 as well to keep bit shifts well-defined.
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
index 10a4337..47123ba 100644
--- a/compiler/dex/mir_optimization_test.cc
+++ b/compiler/dex/mir_optimization_test.cc
@@ -467,8 +467,17 @@
cu_.mir_graph->ComputeDominators();
cu_.mir_graph->ComputeTopologicalSortOrder();
cu_.mir_graph->SSATransformationEnd();
+
bool gate_result = cu_.mir_graph->EliminateSuspendChecksGate();
- ASSERT_TRUE(gate_result);
+ ASSERT_NE(gate_result, kLeafOptimization);
+ if (kLeafOptimization) {
+ // Even with kLeafOptimization on and Gate() refusing to allow SCE, we want
+ // to run the SCE test to avoid bitrot, so we need to initialize explicitly.
+ cu_.mir_graph->suspend_checks_in_loops_ =
+ cu_.mir_graph->arena_->AllocArray<uint32_t>(cu_.mir_graph->GetNumBlocks(),
+ kArenaAllocMisc);
+ }
+
TopologicalSortIterator iterator(cu_.mir_graph.get());
bool change = false;
for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) {
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 2568ee3..7fc6fa2 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -38,6 +38,7 @@
true, // kIntrinsicFloatCvt
true, // kIntrinsicReverseBits
true, // kIntrinsicReverseBytes
+ true, // kIntrinsicNumberOfLeadingZeros
true, // kIntrinsicAbsInt
true, // kIntrinsicAbsLong
true, // kIntrinsicAbsFloat
@@ -75,6 +76,8 @@
static_assert(kIntrinsicIsStatic[kIntrinsicFloatCvt], "FloatCvt must be static");
static_assert(kIntrinsicIsStatic[kIntrinsicReverseBits], "ReverseBits must be static");
static_assert(kIntrinsicIsStatic[kIntrinsicReverseBytes], "ReverseBytes must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicNumberOfLeadingZeros],
+ "NumberOfLeadingZeros must be static");
static_assert(kIntrinsicIsStatic[kIntrinsicAbsInt], "AbsInt must be static");
static_assert(kIntrinsicIsStatic[kIntrinsicAbsLong], "AbsLong must be static");
static_assert(kIntrinsicIsStatic[kIntrinsicAbsFloat], "AbsFloat must be static");
@@ -225,6 +228,7 @@
"putObjectVolatile", // kNameCachePutObjectVolatile
"putOrderedObject", // kNameCachePutOrderedObject
"arraycopy", // kNameCacheArrayCopy
+ "numberOfLeadingZeros", // kNameCacheNumberOfLeadingZeros
};
const DexFileMethodInliner::ProtoDef DexFileMethodInliner::kProtoCacheDefs[] = {
@@ -368,6 +372,9 @@
INTRINSIC(JavaLangInteger, Reverse, I_I, kIntrinsicReverseBits, k32),
INTRINSIC(JavaLangLong, Reverse, J_J, kIntrinsicReverseBits, k64),
+ INTRINSIC(JavaLangInteger, NumberOfLeadingZeros, I_I, kIntrinsicNumberOfLeadingZeros, k32),
+ INTRINSIC(JavaLangLong, NumberOfLeadingZeros, J_I, kIntrinsicNumberOfLeadingZeros, k64),
+
INTRINSIC(JavaLangMath, Abs, I_I, kIntrinsicAbsInt, 0),
INTRINSIC(JavaLangStrictMath, Abs, I_I, kIntrinsicAbsInt, 0),
INTRINSIC(JavaLangMath, Abs, J_J, kIntrinsicAbsLong, 0),
@@ -614,6 +621,8 @@
intrinsic.d.data & kIntrinsicFlagIsOrdered);
case kIntrinsicSystemArrayCopyCharArray:
return backend->GenInlinedArrayCopyCharArray(info);
+ case kIntrinsicNumberOfLeadingZeros:
+ return false; // not implemented in quick
default:
LOG(FATAL) << "Unexpected intrinsic opcode: " << intrinsic.opcode;
return false; // avoid warning "control reaches end of non-void function"
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 26b41bf..bcb9ee5 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -62,49 +62,49 @@
* @return true if the method is a candidate for inlining, false otherwise.
*/
bool AnalyseMethodCode(verifier::MethodVerifier* verifier)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
/**
* Check whether a particular method index corresponds to an intrinsic or special function.
*/
- InlineMethodFlags IsIntrinsicOrSpecial(uint32_t method_index) LOCKS_EXCLUDED(lock_);
+ InlineMethodFlags IsIntrinsicOrSpecial(uint32_t method_index) REQUIRES(!lock_);
/**
* Check whether a particular method index corresponds to an intrinsic function.
*/
- bool IsIntrinsic(uint32_t method_index, InlineMethod* intrinsic) LOCKS_EXCLUDED(lock_);
+ bool IsIntrinsic(uint32_t method_index, InlineMethod* intrinsic) REQUIRES(!lock_);
/**
* Generate code for an intrinsic function invocation.
*/
- bool GenIntrinsic(Mir2Lir* backend, CallInfo* info) LOCKS_EXCLUDED(lock_);
+ bool GenIntrinsic(Mir2Lir* backend, CallInfo* info) REQUIRES(!lock_);
/**
* Check whether a particular method index corresponds to a special function.
*/
- bool IsSpecial(uint32_t method_index) LOCKS_EXCLUDED(lock_);
+ bool IsSpecial(uint32_t method_index) REQUIRES(!lock_);
/**
* Generate code for a special function.
*/
- bool GenSpecial(Mir2Lir* backend, uint32_t method_idx) LOCKS_EXCLUDED(lock_);
+ bool GenSpecial(Mir2Lir* backend, uint32_t method_idx) REQUIRES(!lock_);
/**
* Try to inline an invoke.
*/
bool GenInline(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, uint32_t method_idx)
- LOCKS_EXCLUDED(lock_);
+ REQUIRES(!lock_);
/**
* Gets the thread pointer entrypoint offset for a string init method index and pointer size.
*/
uint32_t GetOffsetForStringInit(uint32_t method_index, size_t pointer_size)
- LOCKS_EXCLUDED(lock_);
+ REQUIRES(!lock_);
/**
* Check whether a particular method index is a string init.
*/
- bool IsStringInitMethodIndex(uint32_t method_index) LOCKS_EXCLUDED(lock_);
+ bool IsStringInitMethodIndex(uint32_t method_index) REQUIRES(!lock_);
/**
* To avoid multiple lookups of a class by its descriptor, we cache its
@@ -206,6 +206,7 @@
kNameCachePutObjectVolatile,
kNameCachePutOrderedObject,
kNameCacheArrayCopy,
+ kNameCacheNumberOfLeadingZeros,
kNameCacheLast
};
@@ -351,11 +352,11 @@
*
* Only DexFileToMethodInlinerMap may call this function to initialize the inliner.
*/
- void FindIntrinsics(const DexFile* dex_file) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void FindIntrinsics(const DexFile* dex_file) REQUIRES(lock_);
friend class DexFileToMethodInlinerMap;
- bool AddInlineMethod(int32_t method_idx, const InlineMethod& method) LOCKS_EXCLUDED(lock_);
+ bool AddInlineMethod(int32_t method_idx, const InlineMethod& method) REQUIRES(!lock_);
static bool GenInlineConst(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
MIR* move_result, const InlineMethod& method);
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index dd68dd4..16c161e 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -36,7 +36,7 @@
namespace art {
// Run the tests only on host.
-#ifndef HAVE_ANDROID_OS
+#ifndef __ANDROID__
class QuickCFITest : public CFITest {
public:
@@ -56,6 +56,8 @@
CompilerOptions::kDefaultSmallMethodThreshold,
CompilerOptions::kDefaultTinyMethodThreshold,
CompilerOptions::kDefaultNumDexMethodsThreshold,
+ CompilerOptions::kDefaultInlineDepthLimit,
+ CompilerOptions::kDefaultInlineMaxCodeUnits,
false,
CompilerOptions::kDefaultTopKProfileThreshold,
false,
@@ -134,6 +136,6 @@
TEST_ISA(kMips)
TEST_ISA(kMips64)
-#endif // HAVE_ANDROID_OS
+#endif // __ANDROID__
} // namespace art
diff --git a/compiler/dex/quick/quick_compiler.h b/compiler/dex/quick/quick_compiler.h
index 43dd578..4a39ab3 100644
--- a/compiler/dex/quick/quick_compiler.h
+++ b/compiler/dex/quick/quick_compiler.h
@@ -50,7 +50,7 @@
const DexFile& dex_file) const OVERRIDE;
uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static Mir2Lir* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit);
diff --git a/compiler/dex/quick/x86/quick_assemble_x86_test.cc b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
index 798e23f..98e9f38 100644
--- a/compiler/dex/quick/x86/quick_assemble_x86_test.cc
+++ b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
@@ -39,6 +39,8 @@
CompilerOptions::kDefaultSmallMethodThreshold,
CompilerOptions::kDefaultTinyMethodThreshold,
CompilerOptions::kDefaultNumDexMethodsThreshold,
+ CompilerOptions::kDefaultInlineDepthLimit,
+ CompilerOptions::kDefaultInlineMaxCodeUnits,
false,
CompilerOptions::kDefaultTopKProfileThreshold,
false,
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index d692d26..03bf57b 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -38,7 +38,7 @@
~QuickCompilerCallbacks() { }
bool MethodVerified(verifier::MethodVerifier* verifier)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+ SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
void ClassRejected(ClassReference ref) OVERRIDE;
diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h
index 7fc2a23..9934f6b 100644
--- a/compiler/dex/verification_results.h
+++ b/compiler/dex/verification_results.h
@@ -43,15 +43,15 @@
~VerificationResults();
bool ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(verified_methods_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!verified_methods_lock_);
const VerifiedMethod* GetVerifiedMethod(MethodReference ref)
- LOCKS_EXCLUDED(verified_methods_lock_);
- void RemoveVerifiedMethod(MethodReference ref) LOCKS_EXCLUDED(verified_methods_lock_);
+ REQUIRES(!verified_methods_lock_);
+ void RemoveVerifiedMethod(MethodReference ref) REQUIRES(!verified_methods_lock_);
- void AddRejectedClass(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_);
- bool IsClassRejected(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_);
+ void AddRejectedClass(ClassReference ref) REQUIRES(!rejected_classes_lock_);
+ bool IsClassRejected(ClassReference ref) REQUIRES(!rejected_classes_lock_);
bool IsCandidateForCompilation(MethodReference& method_ref,
const uint32_t access_flags);
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index bf11839..f7d6d67 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -44,7 +44,7 @@
typedef SafeMap<uint32_t, DexFileReference> DequickenMap;
static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier, bool compile)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
~VerifiedMethod() = default;
const std::vector<uint8_t>& GetDexGcMap() const {
@@ -107,15 +107,15 @@
// Generate devirtualizaion map into devirt_map_.
void GenerateDevirtMap(verifier::MethodVerifier* method_verifier)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Generate dequickening map into dequicken_map_. Returns false if there is an error.
bool GenerateDequickenMap(verifier::MethodVerifier* method_verifier)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Generate safe case set into safe_cast_set_.
void GenerateSafeCastSet(verifier::MethodVerifier* method_verifier)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
std::vector<uint8_t> dex_gc_map_;
DevirtualizationMap devirt_map_;
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index a52bfae..affa52a 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -167,69 +167,69 @@
#define STATS_LOCK()
#endif
- void TypeInDexCache() {
+ void TypeInDexCache() REQUIRES(!stats_lock_) {
STATS_LOCK();
types_in_dex_cache_++;
}
- void TypeNotInDexCache() {
+ void TypeNotInDexCache() REQUIRES(!stats_lock_) {
STATS_LOCK();
types_not_in_dex_cache_++;
}
- void StringInDexCache() {
+ void StringInDexCache() REQUIRES(!stats_lock_) {
STATS_LOCK();
strings_in_dex_cache_++;
}
- void StringNotInDexCache() {
+ void StringNotInDexCache() REQUIRES(!stats_lock_) {
STATS_LOCK();
strings_not_in_dex_cache_++;
}
- void TypeDoesntNeedAccessCheck() {
+ void TypeDoesntNeedAccessCheck() REQUIRES(!stats_lock_) {
STATS_LOCK();
resolved_types_++;
}
- void TypeNeedsAccessCheck() {
+ void TypeNeedsAccessCheck() REQUIRES(!stats_lock_) {
STATS_LOCK();
unresolved_types_++;
}
- void ResolvedInstanceField() {
+ void ResolvedInstanceField() REQUIRES(!stats_lock_) {
STATS_LOCK();
resolved_instance_fields_++;
}
- void UnresolvedInstanceField() {
+ void UnresolvedInstanceField() REQUIRES(!stats_lock_) {
STATS_LOCK();
unresolved_instance_fields_++;
}
- void ResolvedLocalStaticField() {
+ void ResolvedLocalStaticField() REQUIRES(!stats_lock_) {
STATS_LOCK();
resolved_local_static_fields_++;
}
- void ResolvedStaticField() {
+ void ResolvedStaticField() REQUIRES(!stats_lock_) {
STATS_LOCK();
resolved_static_fields_++;
}
- void UnresolvedStaticField() {
+ void UnresolvedStaticField() REQUIRES(!stats_lock_) {
STATS_LOCK();
unresolved_static_fields_++;
}
// Indicate that type information from the verifier led to devirtualization.
- void PreciseTypeDevirtualization() {
+ void PreciseTypeDevirtualization() REQUIRES(!stats_lock_) {
STATS_LOCK();
type_based_devirtualization_++;
}
// Indicate that a method of the given type was resolved at compile time.
- void ResolvedMethod(InvokeType type) {
+ void ResolvedMethod(InvokeType type) REQUIRES(!stats_lock_) {
DCHECK_LE(type, kMaxInvokeType);
STATS_LOCK();
resolved_methods_[type]++;
@@ -237,7 +237,7 @@
// Indicate that a method of the given type was unresolved at compile time as it was in an
// unknown dex file.
- void UnresolvedMethod(InvokeType type) {
+ void UnresolvedMethod(InvokeType type) REQUIRES(!stats_lock_) {
DCHECK_LE(type, kMaxInvokeType);
STATS_LOCK();
unresolved_methods_[type]++;
@@ -245,27 +245,27 @@
// Indicate that a type of virtual method dispatch has been converted into a direct method
// dispatch.
- void VirtualMadeDirect(InvokeType type) {
+ void VirtualMadeDirect(InvokeType type) REQUIRES(!stats_lock_) {
DCHECK(type == kVirtual || type == kInterface || type == kSuper);
STATS_LOCK();
virtual_made_direct_[type]++;
}
// Indicate that a method of the given type was able to call directly into boot.
- void DirectCallsToBoot(InvokeType type) {
+ void DirectCallsToBoot(InvokeType type) REQUIRES(!stats_lock_) {
DCHECK_LE(type, kMaxInvokeType);
STATS_LOCK();
direct_calls_to_boot_[type]++;
}
// Indicate that a method of the given type was able to be resolved directly from boot.
- void DirectMethodsToBoot(InvokeType type) {
+ void DirectMethodsToBoot(InvokeType type) REQUIRES(!stats_lock_) {
DCHECK_LE(type, kMaxInvokeType);
STATS_LOCK();
direct_methods_to_boot_[type]++;
}
- void ProcessedInvoke(InvokeType type, int flags) {
+ void ProcessedInvoke(InvokeType type, int flags) REQUIRES(!stats_lock_) {
STATS_LOCK();
if (flags == 0) {
unresolved_methods_[type]++;
@@ -290,13 +290,13 @@
}
// A check-cast could be eliminated due to verifier type analysis.
- void SafeCast() {
+ void SafeCast() REQUIRES(!stats_lock_) {
STATS_LOCK();
safe_casts_++;
}
// A check-cast couldn't be eliminated due to verifier type analysis.
- void NotASafeCast() {
+ void NotASafeCast() REQUIRES(!stats_lock_) {
STATS_LOCK();
not_safe_casts_++;
}
@@ -690,70 +690,79 @@
return methods_to_compile_->find(tmp.c_str()) != methods_to_compile_->end();
}
-static void ResolveExceptionsForMethod(
- ArtMethod* method_handle, std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
- if (code_item == nullptr) {
- return; // native or abstract method
- }
- if (code_item->tries_size_ == 0) {
- return; // nothing to process
- }
- const uint8_t* encoded_catch_handler_list = DexFile::GetCatchHandlerData(*code_item, 0);
- size_t num_encoded_catch_handlers = DecodeUnsignedLeb128(&encoded_catch_handler_list);
- for (size_t i = 0; i < num_encoded_catch_handlers; i++) {
- int32_t encoded_catch_handler_size = DecodeSignedLeb128(&encoded_catch_handler_list);
- bool has_catch_all = false;
- if (encoded_catch_handler_size <= 0) {
- encoded_catch_handler_size = -encoded_catch_handler_size;
- has_catch_all = true;
+class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
+ public:
+ ResolveCatchBlockExceptionsClassVisitor(
+ std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
+ : exceptions_to_resolve_(exceptions_to_resolve) {}
+
+ void ResolveExceptionsForMethod(ArtMethod* method_handle) SHARED_REQUIRES(Locks::mutator_lock_) {
+ const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
+ if (code_item == nullptr) {
+ return; // native or abstract method
}
- for (int32_t j = 0; j < encoded_catch_handler_size; j++) {
- uint16_t encoded_catch_handler_handlers_type_idx =
- DecodeUnsignedLeb128(&encoded_catch_handler_list);
- // Add to set of types to resolve if not already in the dex cache resolved types
- if (!method_handle->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) {
- exceptions_to_resolve.insert(
- std::pair<uint16_t, const DexFile*>(encoded_catch_handler_handlers_type_idx,
- method_handle->GetDexFile()));
+ if (code_item->tries_size_ == 0) {
+ return; // nothing to process
+ }
+ const uint8_t* encoded_catch_handler_list = DexFile::GetCatchHandlerData(*code_item, 0);
+ size_t num_encoded_catch_handlers = DecodeUnsignedLeb128(&encoded_catch_handler_list);
+ for (size_t i = 0; i < num_encoded_catch_handlers; i++) {
+ int32_t encoded_catch_handler_size = DecodeSignedLeb128(&encoded_catch_handler_list);
+ bool has_catch_all = false;
+ if (encoded_catch_handler_size <= 0) {
+ encoded_catch_handler_size = -encoded_catch_handler_size;
+ has_catch_all = true;
}
- // ignore address associated with catch handler
- DecodeUnsignedLeb128(&encoded_catch_handler_list);
- }
- if (has_catch_all) {
- // ignore catch all address
- DecodeUnsignedLeb128(&encoded_catch_handler_list);
+ for (int32_t j = 0; j < encoded_catch_handler_size; j++) {
+ uint16_t encoded_catch_handler_handlers_type_idx =
+ DecodeUnsignedLeb128(&encoded_catch_handler_list);
+ // Add to set of types to resolve if not already in the dex cache resolved types
+ if (!method_handle->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) {
+ exceptions_to_resolve_.emplace(encoded_catch_handler_handlers_type_idx,
+ method_handle->GetDexFile());
+ }
+ // ignore address associated with catch handler
+ DecodeUnsignedLeb128(&encoded_catch_handler_list);
+ }
+ if (has_catch_all) {
+ // ignore catch all address
+ DecodeUnsignedLeb128(&encoded_catch_handler_list);
+ }
}
}
-}
-static bool ResolveCatchBlockExceptionsClassVisitor(mirror::Class* c, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- auto* exceptions_to_resolve =
- reinterpret_cast<std::set<std::pair<uint16_t, const DexFile*>>*>(arg);
- const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- for (auto& m : c->GetVirtualMethods(pointer_size)) {
- ResolveExceptionsForMethod(&m, *exceptions_to_resolve);
+ virtual bool Visit(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ for (auto& m : c->GetVirtualMethods(pointer_size)) {
+ ResolveExceptionsForMethod(&m);
+ }
+ for (auto& m : c->GetDirectMethods(pointer_size)) {
+ ResolveExceptionsForMethod(&m);
+ }
+ return true;
}
- for (auto& m : c->GetDirectMethods(pointer_size)) {
- ResolveExceptionsForMethod(&m, *exceptions_to_resolve);
- }
- return true;
-}
-static bool RecordImageClassesVisitor(mirror::Class* klass, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- std::unordered_set<std::string>* image_classes =
- reinterpret_cast<std::unordered_set<std::string>*>(arg);
- std::string temp;
- image_classes->insert(klass->GetDescriptor(&temp));
- return true;
-}
+ private:
+ std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve_;
+};
+
+class RecordImageClassesVisitor : public ClassVisitor {
+ public:
+ explicit RecordImageClassesVisitor(std::unordered_set<std::string>* image_classes)
+ : image_classes_(image_classes) {}
+
+ bool Visit(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::string temp;
+ image_classes_->insert(klass->GetDescriptor(&temp));
+ return true;
+ }
+
+ private:
+ std::unordered_set<std::string>* const image_classes_;
+};
// Make a list of descriptors for classes to include in the image
-void CompilerDriver::LoadImageClasses(TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_) {
+void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
CHECK(timings != nullptr);
if (!IsImage()) {
return;
@@ -788,8 +797,8 @@
hs.NewHandle(class_linker->FindSystemClass(self, "Ljava/lang/Throwable;")));
do {
unresolved_exception_types.clear();
- class_linker->VisitClasses(ResolveCatchBlockExceptionsClassVisitor,
- &unresolved_exception_types);
+ ResolveCatchBlockExceptionsClassVisitor visitor(unresolved_exception_types);
+ class_linker->VisitClasses(&visitor);
for (const std::pair<uint16_t, const DexFile*>& exception_type : unresolved_exception_types) {
uint16_t exception_type_idx = exception_type.first;
const DexFile* dex_file = exception_type.second;
@@ -812,14 +821,15 @@
// We walk the roots looking for classes so that we'll pick up the
// above classes plus any classes them depend on such super
// classes, interfaces, and the required ClassLinker roots.
- class_linker->VisitClasses(RecordImageClassesVisitor, image_classes_.get());
+ RecordImageClassesVisitor visitor(image_classes_.get());
+ class_linker->VisitClasses(&visitor);
CHECK_NE(image_classes_->size(), 0U);
}
static void MaybeAddToImageClasses(Handle<mirror::Class> c,
std::unordered_set<std::string>* image_classes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
// Make a copy of the handle so that we don't clobber it doing Assign.
@@ -876,7 +886,7 @@
// Visitor for VisitReferences.
void operator()(mirror::Object* object, MemberOffset field_offset, bool /* is_static */) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Object* ref = object->GetFieldObject<mirror::Object>(field_offset);
if (ref != nullptr) {
VisitClinitClassesObject(ref);
@@ -884,10 +894,15 @@
}
// java.lang.Reference visitor for VisitReferences.
- void operator()(mirror::Class* /* klass */, mirror::Reference* /* ref */) const {
- }
+ void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref ATTRIBUTE_UNUSED)
+ const {}
- void Walk() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Ignore class native roots.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+ const {}
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+
+ void Walk() SHARED_REQUIRES(Locks::mutator_lock_) {
// Use the initial classes as roots for a search.
for (mirror::Class* klass_root : image_classes_) {
VisitClinitClassesObject(klass_root);
@@ -895,9 +910,32 @@
}
private:
+ class FindImageClassesVisitor : public ClassVisitor {
+ public:
+ explicit FindImageClassesVisitor(ClinitImageUpdate* data) : data_(data) {}
+
+ bool Visit(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::string temp;
+ const char* name = klass->GetDescriptor(&temp);
+ if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) {
+ data_->image_classes_.push_back(klass);
+ } else {
+ // Check whether it is initialized and has a clinit. They must be kept, too.
+ if (klass->IsInitialized() && klass->FindClassInitializer(
+ Runtime::Current()->GetClassLinker()->GetImagePointerSize()) != nullptr) {
+ data_->image_classes_.push_back(klass);
+ }
+ }
+ return true;
+ }
+
+ private:
+ ClinitImageUpdate* const data_;
+ };
+
ClinitImageUpdate(std::unordered_set<std::string>* image_class_descriptors, Thread* self,
ClassLinker* linker)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+ SHARED_REQUIRES(Locks::mutator_lock_) :
image_class_descriptors_(image_class_descriptors), self_(self) {
CHECK(linker != nullptr);
CHECK(image_class_descriptors != nullptr);
@@ -911,29 +949,12 @@
// Find all the already-marked classes.
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- linker->VisitClasses(FindImageClasses, this);
- }
-
- static bool FindImageClasses(mirror::Class* klass, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ClinitImageUpdate* data = reinterpret_cast<ClinitImageUpdate*>(arg);
- std::string temp;
- const char* name = klass->GetDescriptor(&temp);
- if (data->image_class_descriptors_->find(name) != data->image_class_descriptors_->end()) {
- data->image_classes_.push_back(klass);
- } else {
- // Check whether it is initialized and has a clinit. They must be kept, too.
- if (klass->IsInitialized() && klass->FindClassInitializer(
- Runtime::Current()->GetClassLinker()->GetImagePointerSize()) != nullptr) {
- data->image_classes_.push_back(klass);
- }
- }
-
- return true;
+ FindImageClassesVisitor visitor(this);
+ linker->VisitClasses(&visitor);
}
void VisitClinitClassesObject(mirror::Object* object) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(object != nullptr);
if (marked_objects_.find(object) != marked_objects_.end()) {
// Already processed.
@@ -1569,10 +1590,14 @@
return result;
}
+class CompilationVisitor {
+ public:
+ virtual ~CompilationVisitor() {}
+ virtual void Visit(size_t index) = 0;
+};
+
class ParallelCompilationManager {
public:
- typedef void Callback(const ParallelCompilationManager* manager, size_t index);
-
ParallelCompilationManager(ClassLinker* class_linker,
jobject class_loader,
CompilerDriver* compiler,
@@ -1610,14 +1635,15 @@
return dex_files_;
}
- void ForAll(size_t begin, size_t end, Callback callback, size_t work_units) {
+ void ForAll(size_t begin, size_t end, CompilationVisitor* visitor, size_t work_units)
+ REQUIRES(!*Locks::mutator_lock_) {
Thread* self = Thread::Current();
self->AssertNoPendingException();
CHECK_GT(work_units, 0U);
index_.StoreRelaxed(begin);
for (size_t i = 0; i < work_units; ++i) {
- thread_pool_->AddTask(self, new ForAllClosure(this, end, callback));
+ thread_pool_->AddTask(self, new ForAllClosure(this, end, visitor));
}
thread_pool_->StartWorkers(self);
@@ -1636,10 +1662,10 @@
private:
class ForAllClosure : public Task {
public:
- ForAllClosure(ParallelCompilationManager* manager, size_t end, Callback* callback)
+ ForAllClosure(ParallelCompilationManager* manager, size_t end, CompilationVisitor* visitor)
: manager_(manager),
end_(end),
- callback_(callback) {}
+ visitor_(visitor) {}
virtual void Run(Thread* self) {
while (true) {
@@ -1647,7 +1673,7 @@
if (UNLIKELY(index >= end_)) {
break;
}
- callback_(manager_, index);
+ visitor_->Visit(index);
self->AssertNoPendingException();
}
}
@@ -1659,7 +1685,7 @@
private:
ParallelCompilationManager* const manager_;
const size_t end_;
- Callback* const callback_;
+ CompilationVisitor* const visitor_;
};
AtomicInteger index_;
@@ -1676,7 +1702,7 @@
// A fast version of SkipClass above if the class pointer is available
// that avoids the expensive FindInClassPath search.
static bool SkipClass(jobject class_loader, const DexFile& dex_file, mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(klass != nullptr);
const DexFile& original_dex_file = *klass->GetDexCache()->GetDexFile();
if (&dex_file != &original_dex_file) {
@@ -1691,7 +1717,7 @@
}
static void CheckAndClearResolveException(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(self->IsExceptionPending());
mirror::Throwable* exception = self->GetException();
std::string temp;
@@ -1717,134 +1743,148 @@
self->ClearException();
}
-static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manager,
- size_t class_def_index)
- LOCKS_EXCLUDED(Locks::mutator_lock_) {
- ATRACE_CALL();
- Thread* self = Thread::Current();
- jobject jclass_loader = manager->GetClassLoader();
- const DexFile& dex_file = *manager->GetDexFile();
- ClassLinker* class_linker = manager->GetClassLinker();
+class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor {
+ public:
+ explicit ResolveClassFieldsAndMethodsVisitor(const ParallelCompilationManager* manager)
+ : manager_(manager) {}
- // If an instance field is final then we need to have a barrier on the return, static final
- // fields are assigned within the lock held for class initialization. Conservatively assume
- // constructor barriers are always required.
- bool requires_constructor_barrier = true;
+ void Visit(size_t class_def_index) OVERRIDE REQUIRES(!Locks::mutator_lock_) {
+ ATRACE_CALL();
+ Thread* const self = Thread::Current();
+ jobject jclass_loader = manager_->GetClassLoader();
+ const DexFile& dex_file = *manager_->GetDexFile();
+ ClassLinker* class_linker = manager_->GetClassLinker();
- // Method and Field are the worst. We can't resolve without either
- // context from the code use (to disambiguate virtual vs direct
- // method and instance vs static field) or from class
- // definitions. While the compiler will resolve what it can as it
- // needs it, here we try to resolve fields and methods used in class
- // definitions, since many of them many never be referenced by
- // generated code.
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- ScopedObjectAccess soa(self);
- StackHandleScope<2> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
- // Resolve the class.
- mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache,
- class_loader);
- bool resolve_fields_and_methods;
- if (klass == nullptr) {
- // Class couldn't be resolved, for example, super-class is in a different dex file. Don't
- // attempt to resolve methods and fields when there is no declaring class.
- CheckAndClearResolveException(soa.Self());
- resolve_fields_and_methods = false;
- } else {
- // We successfully resolved a class, should we skip it?
- if (SkipClass(jclass_loader, dex_file, klass)) {
- return;
- }
- // We want to resolve the methods and fields eagerly.
- resolve_fields_and_methods = true;
- }
- // Note the class_data pointer advances through the headers,
- // static fields, instance fields, direct methods, and virtual
- // methods.
- const uint8_t* class_data = dex_file.GetClassData(class_def);
- if (class_data == nullptr) {
- // Empty class such as a marker interface.
- requires_constructor_barrier = false;
- } else {
- ClassDataItemIterator it(dex_file, class_data);
- while (it.HasNextStaticField()) {
- if (resolve_fields_and_methods) {
- ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
- dex_cache, class_loader, true);
- if (field == nullptr) {
- CheckAndClearResolveException(soa.Self());
- }
+ // If an instance field is final then we need to have a barrier on the return, static final
+ // fields are assigned within the lock held for class initialization. Conservatively assume
+ // constructor barriers are always required.
+ bool requires_constructor_barrier = true;
+
+ // Method and Field are the worst. We can't resolve without either
+ // context from the code use (to disambiguate virtual vs direct
+ // method and instance vs static field) or from class
+ // definitions. While the compiler will resolve what it can as it
+ // needs it, here we try to resolve fields and methods used in class
+ // definitions, since many of them many never be referenced by
+ // generated code.
+ const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ ScopedObjectAccess soa(self);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
+ // Resolve the class.
+ mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache,
+ class_loader);
+ bool resolve_fields_and_methods;
+ if (klass == nullptr) {
+ // Class couldn't be resolved, for example, super-class is in a different dex file. Don't
+ // attempt to resolve methods and fields when there is no declaring class.
+ CheckAndClearResolveException(soa.Self());
+ resolve_fields_and_methods = false;
+ } else {
+ // We successfully resolved a class, should we skip it?
+ if (SkipClass(jclass_loader, dex_file, klass)) {
+ return;
}
- it.Next();
+ // We want to resolve the methods and fields eagerly.
+ resolve_fields_and_methods = true;
}
- // We require a constructor barrier if there are final instance fields.
- requires_constructor_barrier = false;
- while (it.HasNextInstanceField()) {
- if (it.MemberIsFinal()) {
- requires_constructor_barrier = true;
- }
- if (resolve_fields_and_methods) {
- ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
- dex_cache, class_loader, false);
- if (field == nullptr) {
- CheckAndClearResolveException(soa.Self());
- }
- }
- it.Next();
- }
- if (resolve_fields_and_methods) {
- while (it.HasNextDirectMethod()) {
- ArtMethod* method = class_linker->ResolveMethod(
- dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr,
- it.GetMethodInvokeType(class_def));
- if (method == nullptr) {
- CheckAndClearResolveException(soa.Self());
+ // Note the class_data pointer advances through the headers,
+ // static fields, instance fields, direct methods, and virtual
+ // methods.
+ const uint8_t* class_data = dex_file.GetClassData(class_def);
+ if (class_data == nullptr) {
+ // Empty class such as a marker interface.
+ requires_constructor_barrier = false;
+ } else {
+ ClassDataItemIterator it(dex_file, class_data);
+ while (it.HasNextStaticField()) {
+ if (resolve_fields_and_methods) {
+ ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
+ dex_cache, class_loader, true);
+ if (field == nullptr) {
+ CheckAndClearResolveException(soa.Self());
+ }
}
it.Next();
}
- while (it.HasNextVirtualMethod()) {
- ArtMethod* method = class_linker->ResolveMethod(
- dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr,
- it.GetMethodInvokeType(class_def));
- if (method == nullptr) {
- CheckAndClearResolveException(soa.Self());
+ // We require a constructor barrier if there are final instance fields.
+ requires_constructor_barrier = false;
+ while (it.HasNextInstanceField()) {
+ if (it.MemberIsFinal()) {
+ requires_constructor_barrier = true;
+ }
+ if (resolve_fields_and_methods) {
+ ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
+ dex_cache, class_loader, false);
+ if (field == nullptr) {
+ CheckAndClearResolveException(soa.Self());
+ }
}
it.Next();
}
- DCHECK(!it.HasNext());
+ if (resolve_fields_and_methods) {
+ while (it.HasNextDirectMethod()) {
+ ArtMethod* method = class_linker->ResolveMethod(
+ dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr,
+ it.GetMethodInvokeType(class_def));
+ if (method == nullptr) {
+ CheckAndClearResolveException(soa.Self());
+ }
+ it.Next();
+ }
+ while (it.HasNextVirtualMethod()) {
+ ArtMethod* method = class_linker->ResolveMethod(
+ dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr,
+ it.GetMethodInvokeType(class_def));
+ if (method == nullptr) {
+ CheckAndClearResolveException(soa.Self());
+ }
+ it.Next();
+ }
+ DCHECK(!it.HasNext());
+ }
+ }
+ if (requires_constructor_barrier) {
+ manager_->GetCompiler()->AddRequiresConstructorBarrier(self, &dex_file, class_def_index);
}
}
- if (requires_constructor_barrier) {
- manager->GetCompiler()->AddRequiresConstructorBarrier(self, &dex_file, class_def_index);
- }
-}
-static void ResolveType(const ParallelCompilationManager* manager, size_t type_idx)
- LOCKS_EXCLUDED(Locks::mutator_lock_) {
+ private:
+ const ParallelCompilationManager* const manager_;
+};
+
+class ResolveTypeVisitor : public CompilationVisitor {
+ public:
+ explicit ResolveTypeVisitor(const ParallelCompilationManager* manager) : manager_(manager) {
+ }
+ virtual void Visit(size_t type_idx) OVERRIDE REQUIRES(!Locks::mutator_lock_) {
// Class derived values are more complicated, they require the linker and loader.
- ScopedObjectAccess soa(Thread::Current());
- ClassLinker* class_linker = manager->GetClassLinker();
- const DexFile& dex_file = *manager->GetDexFile();
- StackHandleScope<2> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader())));
- mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* class_linker = manager_->GetClassLinker();
+ const DexFile& dex_file = *manager_->GetDexFile();
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager_->GetClassLoader())));
+ mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
- if (klass == nullptr) {
- CHECK(soa.Self()->IsExceptionPending());
- mirror::Throwable* exception = soa.Self()->GetException();
- VLOG(compiler) << "Exception during type resolution: " << exception->Dump();
- if (exception->GetClass()->DescriptorEquals("Ljava/lang/OutOfMemoryError;")) {
- // There's little point continuing compilation if the heap is exhausted.
- LOG(FATAL) << "Out of memory during type resolution for compilation";
+ if (klass == nullptr) {
+ soa.Self()->AssertPendingException();
+ mirror::Throwable* exception = soa.Self()->GetException();
+ VLOG(compiler) << "Exception during type resolution: " << exception->Dump();
+ if (exception->GetClass()->DescriptorEquals("Ljava/lang/OutOfMemoryError;")) {
+ // There's little point continuing compilation if the heap is exhausted.
+ LOG(FATAL) << "Out of memory during type resolution for compilation";
+ }
+ soa.Self()->ClearException();
}
- soa.Self()->ClearException();
}
-}
+
+ private:
+ const ParallelCompilationManager* const manager_;
+};
void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
@@ -1860,17 +1900,18 @@
// For images we resolve all types, such as array, whereas for applications just those with
// classdefs are resolved by ResolveClassFieldsAndMethods.
TimingLogger::ScopedTiming t("Resolve Types", timings);
- context.ForAll(0, dex_file.NumTypeIds(), ResolveType, thread_count_);
+ ResolveTypeVisitor visitor(&context);
+ context.ForAll(0, dex_file.NumTypeIds(), &visitor, thread_count_);
}
TimingLogger::ScopedTiming t("Resolve MethodsAndFields", timings);
- context.ForAll(0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_);
+ ResolveClassFieldsAndMethodsVisitor visitor(&context);
+ context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_);
}
void CompilerDriver::SetVerified(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings) {
- for (size_t i = 0; i != dex_files.size(); ++i) {
- const DexFile* dex_file = dex_files[i];
+ for (const DexFile* dex_file : dex_files) {
CHECK(dex_file != nullptr);
SetVerifiedDexFile(class_loader, *dex_file, dex_files, thread_pool, timings);
}
@@ -1878,67 +1919,73 @@
void CompilerDriver::Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings) {
- for (size_t i = 0; i != dex_files.size(); ++i) {
- const DexFile* dex_file = dex_files[i];
+ for (const DexFile* dex_file : dex_files) {
CHECK(dex_file != nullptr);
VerifyDexFile(class_loader, *dex_file, dex_files, thread_pool, timings);
}
}
-static void VerifyClass(const ParallelCompilationManager* manager, size_t class_def_index)
- LOCKS_EXCLUDED(Locks::mutator_lock_) {
- ATRACE_CALL();
- ScopedObjectAccess soa(Thread::Current());
- const DexFile& dex_file = *manager->GetDexFile();
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- const char* descriptor = dex_file.GetClassDescriptor(class_def);
- ClassLinker* class_linker = manager->GetClassLinker();
- jobject jclass_loader = manager->GetClassLoader();
- StackHandleScope<3> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
- Handle<mirror::Class> klass(
- hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
- if (klass.Get() == nullptr) {
- CHECK(soa.Self()->IsExceptionPending());
- soa.Self()->ClearException();
+class VerifyClassVisitor : public CompilationVisitor {
+ public:
+ explicit VerifyClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
- /*
- * At compile time, we can still structurally verify the class even if FindClass fails.
- * This is to ensure the class is structurally sound for compilation. An unsound class
- * will be rejected by the verifier and later skipped during compilation in the compiler.
- */
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
- std::string error_msg;
- if (verifier::MethodVerifier::VerifyClass(soa.Self(), &dex_file, dex_cache, class_loader,
- &class_def, true, &error_msg) ==
- verifier::MethodVerifier::kHardFailure) {
- LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
- << " because: " << error_msg;
- manager->GetCompiler()->SetHadHardVerifierFailure();
- }
- } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
- CHECK(klass->IsResolved()) << PrettyClass(klass.Get());
- class_linker->VerifyClass(soa.Self(), klass);
-
- if (klass->IsErroneous()) {
- // ClassLinker::VerifyClass throws, which isn't useful in the compiler.
+ virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ ATRACE_CALL();
+ ScopedObjectAccess soa(Thread::Current());
+ const DexFile& dex_file = *manager_->GetDexFile();
+ const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ const char* descriptor = dex_file.GetClassDescriptor(class_def);
+ ClassLinker* class_linker = manager_->GetClassLinker();
+ jobject jclass_loader = manager_->GetClassLoader();
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::Class> klass(
+ hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
+ if (klass.Get() == nullptr) {
CHECK(soa.Self()->IsExceptionPending());
soa.Self()->ClearException();
- manager->GetCompiler()->SetHadHardVerifierFailure();
+
+ /*
+ * At compile time, we can still structurally verify the class even if FindClass fails.
+ * This is to ensure the class is structurally sound for compilation. An unsound class
+ * will be rejected by the verifier and later skipped during compilation in the compiler.
+ */
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
+ std::string error_msg;
+ if (verifier::MethodVerifier::VerifyClass(soa.Self(), &dex_file, dex_cache, class_loader,
+ &class_def, true, &error_msg) ==
+ verifier::MethodVerifier::kHardFailure) {
+ LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
+ << " because: " << error_msg;
+ manager_->GetCompiler()->SetHadHardVerifierFailure();
+ }
+ } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
+ CHECK(klass->IsResolved()) << PrettyClass(klass.Get());
+ class_linker->VerifyClass(soa.Self(), klass);
+
+ if (klass->IsErroneous()) {
+ // ClassLinker::VerifyClass throws, which isn't useful in the compiler.
+ CHECK(soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException();
+ manager_->GetCompiler()->SetHadHardVerifierFailure();
+ }
+
+ CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous())
+ << PrettyDescriptor(klass.Get()) << ": state=" << klass->GetStatus();
+
+ // It is *very* problematic if there are verification errors in the boot classpath. For example,
+ // we rely on things working OK without verification when the decryption dialog is brought up.
+ // So abort in a debug build if we find this violated.
+ DCHECK(!manager_->GetCompiler()->IsImage() || klass->IsVerified()) << "Boot classpath class "
+ << PrettyClass(klass.Get()) << " failed to fully verify.";
}
-
- CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous())
- << PrettyDescriptor(klass.Get()) << ": state=" << klass->GetStatus();
-
- // It is *very* problematic if there are verification errors in the boot classpath. For example,
- // we rely on things working OK without verification when the decryption dialog is brought up.
- // So abort in a debug build if we find this violated.
- DCHECK(!manager->GetCompiler()->IsImage() || klass->IsVerified()) << "Boot classpath class " <<
- PrettyClass(klass.Get()) << " failed to fully verify.";
+ soa.Self()->AssertNoPendingException();
}
- soa.Self()->AssertNoPendingException();
-}
+
+ private:
+ const ParallelCompilationManager* const manager_;
+};
void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
@@ -1947,48 +1994,56 @@
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files,
thread_pool);
- context.ForAll(0, dex_file.NumClassDefs(), VerifyClass, thread_count_);
+ VerifyClassVisitor visitor(&context);
+ context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_);
}
-static void SetVerifiedClass(const ParallelCompilationManager* manager, size_t class_def_index)
- LOCKS_EXCLUDED(Locks::mutator_lock_) {
- ATRACE_CALL();
- ScopedObjectAccess soa(Thread::Current());
- const DexFile& dex_file = *manager->GetDexFile();
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- const char* descriptor = dex_file.GetClassDescriptor(class_def);
- ClassLinker* class_linker = manager->GetClassLinker();
- jobject jclass_loader = manager->GetClassLoader();
- StackHandleScope<3> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
- Handle<mirror::Class> klass(
- hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
- // Class might have failed resolution. Then don't set it to verified.
- if (klass.Get() != nullptr) {
- // Only do this if the class is resolved. If even resolution fails, quickening will go very,
- // very wrong.
- if (klass->IsResolved()) {
- if (klass->GetStatus() < mirror::Class::kStatusVerified) {
- ObjectLock<mirror::Class> lock(soa.Self(), klass);
- // Set class status to verified.
- mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, soa.Self());
- // Mark methods as pre-verified. If we don't do this, the interpreter will run with
- // access checks.
- klass->SetPreverifiedFlagOnAllMethods(
- GetInstructionSetPointerSize(manager->GetCompiler()->GetInstructionSet()));
- klass->SetPreverified();
+class SetVerifiedClassVisitor : public CompilationVisitor {
+ public:
+ explicit SetVerifiedClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
+
+ virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ ATRACE_CALL();
+ ScopedObjectAccess soa(Thread::Current());
+ const DexFile& dex_file = *manager_->GetDexFile();
+ const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ const char* descriptor = dex_file.GetClassDescriptor(class_def);
+ ClassLinker* class_linker = manager_->GetClassLinker();
+ jobject jclass_loader = manager_->GetClassLoader();
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::Class> klass(
+ hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
+ // Class might have failed resolution. Then don't set it to verified.
+ if (klass.Get() != nullptr) {
+ // Only do this if the class is resolved. If even resolution fails, quickening will go very,
+ // very wrong.
+ if (klass->IsResolved()) {
+ if (klass->GetStatus() < mirror::Class::kStatusVerified) {
+ ObjectLock<mirror::Class> lock(soa.Self(), klass);
+ // Set class status to verified.
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, soa.Self());
+ // Mark methods as pre-verified. If we don't do this, the interpreter will run with
+ // access checks.
+ klass->SetPreverifiedFlagOnAllMethods(
+ GetInstructionSetPointerSize(manager_->GetCompiler()->GetInstructionSet()));
+ klass->SetPreverified();
+ }
+ // Record the final class status if necessary.
+ ClassReference ref(manager_->GetDexFile(), class_def_index);
+ manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
}
- // Record the final class status if necessary.
- ClassReference ref(manager->GetDexFile(), class_def_index);
- manager->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
+ } else {
+ Thread* self = soa.Self();
+ DCHECK(self->IsExceptionPending());
+ self->ClearException();
}
- } else {
- Thread* self = soa.Self();
- DCHECK(self->IsExceptionPending());
- self->ClearException();
}
-}
+
+ private:
+ const ParallelCompilationManager* const manager_;
+};
void CompilerDriver::SetVerifiedDexFile(jobject class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
@@ -1997,99 +2052,107 @@
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files,
thread_pool);
- context.ForAll(0, dex_file.NumClassDefs(), SetVerifiedClass, thread_count_);
+ SetVerifiedClassVisitor visitor(&context);
+ context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_);
}
-static void InitializeClass(const ParallelCompilationManager* manager, size_t class_def_index)
- LOCKS_EXCLUDED(Locks::mutator_lock_) {
- ATRACE_CALL();
- jobject jclass_loader = manager->GetClassLoader();
- const DexFile& dex_file = *manager->GetDexFile();
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def.class_idx_);
- const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
+class InitializeClassVisitor : public CompilationVisitor {
+ public:
+ explicit InitializeClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<3> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
- Handle<mirror::Class> klass(
- hs.NewHandle(manager->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader)));
+ virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ ATRACE_CALL();
+ jobject jclass_loader = manager_->GetClassLoader();
+ const DexFile& dex_file = *manager_->GetDexFile();
+ const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def.class_idx_);
+ const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
- if (klass.Get() != nullptr && !SkipClass(jclass_loader, dex_file, klass.Get())) {
- // Only try to initialize classes that were successfully verified.
- if (klass->IsVerified()) {
- // Attempt to initialize the class but bail if we either need to initialize the super-class
- // or static fields.
- manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false);
- if (!klass->IsInitialized()) {
- // We don't want non-trivial class initialization occurring on multiple threads due to
- // deadlock problems. For example, a parent class is initialized (holding its lock) that
- // refers to a sub-class in its static/class initializer causing it to try to acquire the
- // sub-class' lock. While on a second thread the sub-class is initialized (holding its lock)
- // after first initializing its parents, whose locks are acquired. This leads to a
- // parent-to-child and a child-to-parent lock ordering and consequent potential deadlock.
- // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather
- // than use a special Object for the purpose we use the Class of java.lang.Class.
- Handle<mirror::Class> h_klass(hs.NewHandle(klass->GetClass()));
- ObjectLock<mirror::Class> lock(soa.Self(), h_klass);
- // Attempt to initialize allowing initialization of parent classes but still not static
- // fields.
- manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true);
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::Class> klass(
+ hs.NewHandle(manager_->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader)));
+
+ if (klass.Get() != nullptr && !SkipClass(jclass_loader, dex_file, klass.Get())) {
+ // Only try to initialize classes that were successfully verified.
+ if (klass->IsVerified()) {
+ // Attempt to initialize the class but bail if we either need to initialize the super-class
+ // or static fields.
+ manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false);
if (!klass->IsInitialized()) {
- // We need to initialize static fields, we only do this for image classes that aren't
- // marked with the $NoPreloadHolder (which implies this should not be initialized early).
- bool can_init_static_fields = manager->GetCompiler()->IsImage() &&
- manager->GetCompiler()->IsImageClass(descriptor) &&
- !StringPiece(descriptor).ends_with("$NoPreloadHolder;");
- if (can_init_static_fields) {
- VLOG(compiler) << "Initializing: " << descriptor;
- // TODO multithreading support. We should ensure the current compilation thread has
- // exclusive access to the runtime and the transaction. To achieve this, we could use
- // a ReaderWriterMutex but we're holding the mutator lock so we fail mutex sanity
- // checks in Thread::AssertThreadSuspensionIsAllowable.
- Runtime* const runtime = Runtime::Current();
- Transaction transaction;
+ // We don't want non-trivial class initialization occurring on multiple threads due to
+ // deadlock problems. For example, a parent class is initialized (holding its lock) that
+ // refers to a sub-class in its static/class initializer causing it to try to acquire the
+ // sub-class' lock. While on a second thread the sub-class is initialized (holding its lock)
+ // after first initializing its parents, whose locks are acquired. This leads to a
+ // parent-to-child and a child-to-parent lock ordering and consequent potential deadlock.
+ // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather
+ // than use a special Object for the purpose we use the Class of java.lang.Class.
+ Handle<mirror::Class> h_klass(hs.NewHandle(klass->GetClass()));
+ ObjectLock<mirror::Class> lock(soa.Self(), h_klass);
+ // Attempt to initialize allowing initialization of parent classes but still not static
+ // fields.
+ manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true);
+ if (!klass->IsInitialized()) {
+ // We need to initialize static fields, we only do this for image classes that aren't
+ // marked with the $NoPreloadHolder (which implies this should not be initialized early).
+ bool can_init_static_fields = manager_->GetCompiler()->IsImage() &&
+ manager_->GetCompiler()->IsImageClass(descriptor) &&
+ !StringPiece(descriptor).ends_with("$NoPreloadHolder;");
+ if (can_init_static_fields) {
+ VLOG(compiler) << "Initializing: " << descriptor;
+ // TODO multithreading support. We should ensure the current compilation thread has
+ // exclusive access to the runtime and the transaction. To achieve this, we could use
+ // a ReaderWriterMutex but we're holding the mutator lock so we fail mutex sanity
+ // checks in Thread::AssertThreadSuspensionIsAllowable.
+ Runtime* const runtime = Runtime::Current();
+ Transaction transaction;
- // Run the class initializer in transaction mode.
- runtime->EnterTransactionMode(&transaction);
- const mirror::Class::Status old_status = klass->GetStatus();
- bool success = manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true,
- true);
- // TODO we detach transaction from runtime to indicate we quit the transactional
- // mode which prevents the GC from visiting objects modified during the transaction.
- // Ensure GC is not run so don't access freed objects when aborting transaction.
+ // Run the class initializer in transaction mode.
+ runtime->EnterTransactionMode(&transaction);
+ const mirror::Class::Status old_status = klass->GetStatus();
+ bool success = manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true,
+ true);
+ // TODO we detach transaction from runtime to indicate we quit the transactional
+ // mode which prevents the GC from visiting objects modified during the transaction.
+ // Ensure GC is not run so don't access freed objects when aborting transaction.
- ScopedAssertNoThreadSuspension ants(soa.Self(), "Transaction end");
- runtime->ExitTransactionMode();
+ ScopedAssertNoThreadSuspension ants(soa.Self(), "Transaction end");
+ runtime->ExitTransactionMode();
- if (!success) {
- CHECK(soa.Self()->IsExceptionPending());
- mirror::Throwable* exception = soa.Self()->GetException();
- VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
- << exception->Dump();
- std::ostream* file_log = manager->GetCompiler()->
- GetCompilerOptions().GetInitFailureOutput();
- if (file_log != nullptr) {
- *file_log << descriptor << "\n";
- *file_log << exception->Dump() << "\n";
+ if (!success) {
+ CHECK(soa.Self()->IsExceptionPending());
+ mirror::Throwable* exception = soa.Self()->GetException();
+ VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
+ << exception->Dump();
+ std::ostream* file_log = manager_->GetCompiler()->
+ GetCompilerOptions().GetInitFailureOutput();
+ if (file_log != nullptr) {
+ *file_log << descriptor << "\n";
+ *file_log << exception->Dump() << "\n";
+ }
+ soa.Self()->ClearException();
+ transaction.Rollback();
+ CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
}
- soa.Self()->ClearException();
- transaction.Rollback();
- CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
}
}
+ soa.Self()->AssertNoPendingException();
}
- soa.Self()->AssertNoPendingException();
}
+ // Record the final class status if necessary.
+ ClassReference ref(manager_->GetDexFile(), class_def_index);
+ manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
}
- // Record the final class status if necessary.
- ClassReference ref(manager->GetDexFile(), class_def_index);
- manager->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
+ // Clear any class not found or verification exceptions.
+ soa.Self()->ClearException();
}
- // Clear any class not found or verification exceptions.
- soa.Self()->ClearException();
-}
+
+ private:
+ const ParallelCompilationManager* const manager_;
+};
void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
@@ -2105,7 +2168,8 @@
} else {
thread_count = thread_count_;
}
- context.ForAll(0, dex_file.NumClassDefs(), InitializeClass, thread_count);
+ InitializeClassVisitor visitor(&context);
+ context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count);
}
void CompilerDriver::InitializeClasses(jobject class_loader,
@@ -2132,101 +2196,108 @@
VLOG(compiler) << "Compile: " << GetMemoryUsageString(false);
}
-void CompilerDriver::CompileClass(const ParallelCompilationManager* manager,
- size_t class_def_index) {
- ATRACE_CALL();
- const DexFile& dex_file = *manager->GetDexFile();
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- ClassLinker* class_linker = manager->GetClassLinker();
- jobject jclass_loader = manager->GetClassLoader();
- Thread* self = Thread::Current();
- {
- // Use a scoped object access to perform to the quick SkipClass check.
- const char* descriptor = dex_file.GetClassDescriptor(class_def);
- ScopedObjectAccess soa(self);
- StackHandleScope<3> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
- Handle<mirror::Class> klass(
- hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
- if (klass.Get() == nullptr) {
- CHECK(soa.Self()->IsExceptionPending());
- soa.Self()->ClearException();
- } else if (SkipClass(jclass_loader, dex_file, klass.Get())) {
+class CompileClassVisitor : public CompilationVisitor {
+ public:
+ explicit CompileClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
+
+ virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ ATRACE_CALL();
+ const DexFile& dex_file = *manager_->GetDexFile();
+ const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ ClassLinker* class_linker = manager_->GetClassLinker();
+ jobject jclass_loader = manager_->GetClassLoader();
+ Thread* self = Thread::Current();
+ {
+ // Use a scoped object access to perform to the quick SkipClass check.
+ const char* descriptor = dex_file.GetClassDescriptor(class_def);
+ ScopedObjectAccess soa(self);
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::Class> klass(
+ hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
+ if (klass.Get() == nullptr) {
+ CHECK(soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException();
+ } else if (SkipClass(jclass_loader, dex_file, klass.Get())) {
+ return;
+ }
+ }
+ ClassReference ref(&dex_file, class_def_index);
+ // Skip compiling classes with generic verifier failures since they will still fail at runtime
+ if (manager_->GetCompiler()->verification_results_->IsClassRejected(ref)) {
return;
}
- }
- ClassReference ref(&dex_file, class_def_index);
- // Skip compiling classes with generic verifier failures since they will still fail at runtime
- if (manager->GetCompiler()->verification_results_->IsClassRejected(ref)) {
- return;
- }
- const uint8_t* class_data = dex_file.GetClassData(class_def);
- if (class_data == nullptr) {
- // empty class, probably a marker interface
- return;
- }
-
- CompilerDriver* const driver = manager->GetCompiler();
-
- // Can we run DEX-to-DEX compiler on this class ?
- DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
- {
- ScopedObjectAccess soa(self);
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
- dex_to_dex_compilation_level = driver->GetDexToDexCompilationlevel(
- soa.Self(), class_loader, dex_file, class_def);
- }
- ClassDataItemIterator it(dex_file, class_data);
- // Skip fields
- while (it.HasNextStaticField()) {
- it.Next();
- }
- while (it.HasNextInstanceField()) {
- it.Next();
- }
-
- bool compilation_enabled = driver->IsClassToCompile(
- dex_file.StringByTypeIdx(class_def.class_idx_));
-
- // Compile direct methods
- int64_t previous_direct_method_idx = -1;
- while (it.HasNextDirectMethod()) {
- uint32_t method_idx = it.GetMemberIndex();
- if (method_idx == previous_direct_method_idx) {
- // smali can create dex files with two encoded_methods sharing the same method_idx
- // http://code.google.com/p/smali/issues/detail?id=119
- it.Next();
- continue;
+ const uint8_t* class_data = dex_file.GetClassData(class_def);
+ if (class_data == nullptr) {
+ // empty class, probably a marker interface
+ return;
}
- previous_direct_method_idx = method_idx;
- driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
- it.GetMethodInvokeType(class_def), class_def_index,
- method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
- compilation_enabled);
- it.Next();
- }
- // Compile virtual methods
- int64_t previous_virtual_method_idx = -1;
- while (it.HasNextVirtualMethod()) {
- uint32_t method_idx = it.GetMemberIndex();
- if (method_idx == previous_virtual_method_idx) {
- // smali can create dex files with two encoded_methods sharing the same method_idx
- // http://code.google.com/p/smali/issues/detail?id=119
- it.Next();
- continue;
+
+ CompilerDriver* const driver = manager_->GetCompiler();
+
+ // Can we run DEX-to-DEX compiler on this class ?
+ DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
+ {
+ ScopedObjectAccess soa(self);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ dex_to_dex_compilation_level = driver->GetDexToDexCompilationlevel(
+ soa.Self(), class_loader, dex_file, class_def);
}
- previous_virtual_method_idx = method_idx;
- driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
- it.GetMethodInvokeType(class_def), class_def_index,
- method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
- compilation_enabled);
- it.Next();
+ ClassDataItemIterator it(dex_file, class_data);
+ // Skip fields
+ while (it.HasNextStaticField()) {
+ it.Next();
+ }
+ while (it.HasNextInstanceField()) {
+ it.Next();
+ }
+
+ bool compilation_enabled = driver->IsClassToCompile(
+ dex_file.StringByTypeIdx(class_def.class_idx_));
+
+ // Compile direct methods
+ int64_t previous_direct_method_idx = -1;
+ while (it.HasNextDirectMethod()) {
+ uint32_t method_idx = it.GetMemberIndex();
+ if (method_idx == previous_direct_method_idx) {
+ // smali can create dex files with two encoded_methods sharing the same method_idx
+ // http://code.google.com/p/smali/issues/detail?id=119
+ it.Next();
+ continue;
+ }
+ previous_direct_method_idx = method_idx;
+ driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
+ it.GetMethodInvokeType(class_def), class_def_index,
+ method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
+ compilation_enabled);
+ it.Next();
+ }
+ // Compile virtual methods
+ int64_t previous_virtual_method_idx = -1;
+ while (it.HasNextVirtualMethod()) {
+ uint32_t method_idx = it.GetMemberIndex();
+ if (method_idx == previous_virtual_method_idx) {
+ // smali can create dex files with two encoded_methods sharing the same method_idx
+ // http://code.google.com/p/smali/issues/detail?id=119
+ it.Next();
+ continue;
+ }
+ previous_virtual_method_idx = method_idx;
+ driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
+ it.GetMethodInvokeType(class_def), class_def_index,
+ method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
+ compilation_enabled);
+ it.Next();
+ }
+ DCHECK(!it.HasNext());
}
- DCHECK(!it.HasNext());
-}
+
+ private:
+ const ParallelCompilationManager* const manager_;
+};
void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
@@ -2234,7 +2305,8 @@
TimingLogger::ScopedTiming t("Compile Dex File", timings);
ParallelCompilationManager context(Runtime::Current()->GetClassLinker(), class_loader, this,
&dex_file, dex_files, thread_pool);
- context.ForAll(0, dex_file.NumClassDefs(), CompilerDriver::CompileClass, thread_count_);
+ CompileClassVisitor visitor(&context);
+ context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_);
}
// Does the runtime for the InstructionSet provide an implementation returned by
@@ -2453,7 +2525,7 @@
const std::vector<const art::DexFile*>& dex_files,
OatWriter* oat_writer,
art::File* file)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (kProduce64BitELFFiles && Is64BitInstructionSet(GetInstructionSet())) {
return art::ElfWriterQuick64::Create(file, oat_writer, dex_files, android_root, is_host, *this);
} else {
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 5cf4044..88e03a2 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -114,14 +114,15 @@
void CompileAll(jobject class_loader, const std::vector<const DexFile*>& dex_files,
TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
CompiledMethod* CompileMethod(Thread* self, ArtMethod*)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) WARN_UNUSED;
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!compiled_methods_lock_) WARN_UNUSED;
// Compile a single Method.
void CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!compiled_methods_lock_, !compiled_classes_lock_);
VerificationResults* GetVerificationResults() const {
return verification_results_;
@@ -162,54 +163,56 @@
// Generate the trampolines that are invoked by unresolved direct methods.
const std::vector<uint8_t>* CreateInterpreterToInterpreterBridge() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateInterpreterToCompiledCodeBridge() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateJniDlsymLookup() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateQuickGenericJniTrampoline() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateQuickImtConflictTrampoline() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateQuickResolutionTrampoline() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateQuickToInterpreterBridge() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
CompiledClass* GetCompiledClass(ClassReference ref) const
- LOCKS_EXCLUDED(compiled_classes_lock_);
+ REQUIRES(!compiled_classes_lock_);
CompiledMethod* GetCompiledMethod(MethodReference ref) const
- LOCKS_EXCLUDED(compiled_methods_lock_);
+ REQUIRES(!compiled_methods_lock_);
size_t GetNonRelativeLinkerPatchCount() const
- LOCKS_EXCLUDED(compiled_methods_lock_);
+ REQUIRES(!compiled_methods_lock_);
// Remove and delete a compiled method.
- void RemoveCompiledMethod(const MethodReference& method_ref);
+ void RemoveCompiledMethod(const MethodReference& method_ref) REQUIRES(!compiled_methods_lock_);
void AddRequiresConstructorBarrier(Thread* self, const DexFile* dex_file,
- uint16_t class_def_index);
+ uint16_t class_def_index)
+ REQUIRES(!freezing_constructor_lock_);
bool RequiresConstructorBarrier(Thread* self, const DexFile* dex_file,
- uint16_t class_def_index) const;
+ uint16_t class_def_index) const
+ REQUIRES(!freezing_constructor_lock_);
// Callbacks from compiler to see what runtime checks must be generated.
bool CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file, uint32_t type_idx);
bool CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, uint32_t string_idx)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
// Are runtime access checks necessary in the compiled code?
bool CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file,
uint32_t type_idx, bool* type_known_final = nullptr,
bool* type_known_abstract = nullptr,
bool* equals_referrers_class = nullptr)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
// Are runtime access and instantiable checks necessary in the code?
bool CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file,
uint32_t type_idx)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
bool CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_idx,
bool* is_type_initialized, bool* use_direct_type_ptr,
@@ -223,22 +226,22 @@
// Get the DexCache for the
mirror::DexCache* GetDexCache(const DexCompilationUnit* mUnit)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
mirror::ClassLoader* GetClassLoader(ScopedObjectAccess& soa, const DexCompilationUnit* mUnit)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Resolve compiling method's class. Returns null on failure.
mirror::Class* ResolveCompilingMethodsClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Class* ResolveClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, uint16_t type_index,
const DexCompilationUnit* mUnit)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Resolve a field. Returns null on failure, including incompatible class change.
// NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
@@ -246,40 +249,40 @@
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t field_idx, bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Resolve a field with a given dex file.
ArtField* ResolveFieldWithDexFile(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexFile* dex_file,
uint32_t field_idx, bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get declaration location of a resolved field.
void GetResolvedFieldDexFileLocation(
ArtField* resolved_field, const DexFile** declaring_dex_file,
uint16_t* declaring_class_idx, uint16_t* declaring_field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsFieldVolatile(ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- MemberOffset GetFieldOffset(ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsFieldVolatile(ArtField* field) SHARED_REQUIRES(Locks::mutator_lock_);
+ MemberOffset GetFieldOffset(ArtField* field) SHARED_REQUIRES(Locks::mutator_lock_);
// Find a dex cache for a dex file.
inline mirror::DexCache* FindDexCache(const DexFile* dex_file)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset.
std::pair<bool, bool> IsFastInstanceField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
ArtField* resolved_field, uint16_t field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can we fast-path an SGET/SPUT access to a static field? If yes, compute the type index
// of the declaring class in the referrer's dex file.
std::pair<bool, bool> IsFastStaticField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Return whether the declaring class of `resolved_method` is
// available to `referrer_class`. If this is true, compute the type
@@ -291,34 +294,34 @@
ArtMethod* resolved_method,
uint16_t method_idx,
uint32_t* storage_index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Is static field's in referrer's class?
bool IsStaticFieldInReferrerClass(mirror::Class* referrer_class, ArtField* resolved_field)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Is static field's class initialized?
bool IsStaticFieldsClassInitialized(mirror::Class* referrer_class,
ArtField* resolved_field)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Resolve a method. Returns null on failure, including incompatible class change.
ArtMethod* ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change = true)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get declaration location of a resolved field.
void GetResolvedMethodDexFileLocation(
ArtMethod* resolved_method, const DexFile** declaring_dex_file,
uint16_t* declaring_class_idx, uint16_t* declaring_method_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get the index in the vtable of the method.
uint16_t GetResolvedMethodVTableIndex(
ArtMethod* resolved_method, InvokeType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can we fast-path an INVOKE? If no, returns 0. If yes, returns a non-zero opaque flags value
// for ProcessedInvoke() and computes the necessary lowering info.
@@ -328,13 +331,13 @@
mirror::Class* referrer_class, ArtMethod* resolved_method, InvokeType* invoke_type,
MethodReference* target_method, const MethodReference* devirt_target,
uintptr_t* direct_code, uintptr_t* direct_method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Is method's class initialized for an invoke?
// For static invokes to determine whether we need to consider potential call to <clinit>().
// For non-static invokes, assuming a non-null reference, the class is always initialized.
bool IsMethodsClassInitialized(mirror::Class* referrer_class, ArtMethod* resolved_method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get the layout of dex cache arrays for a dex file. Returns invalid layout if the
// dex cache arrays don't have a fixed layout.
@@ -349,18 +352,18 @@
ArtField** resolved_field,
mirror::Class** referrer_class,
mirror::DexCache** dex_cache)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can we fast path instance field access? Computes field's offset and volatility.
bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
MemberOffset* field_offset, bool* is_volatile)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
ArtField* ComputeInstanceFieldInfo(uint32_t field_idx,
const DexCompilationUnit* mUnit,
bool is_put,
const ScopedObjectAccess& soa)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can we fastpath static field access? Computes field's offset, volatility and whether the
@@ -369,7 +372,7 @@
MemberOffset* field_offset, uint32_t* storage_index,
bool* is_referrers_class, bool* is_volatile, bool* is_initialized,
Primitive::Type* type)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
// Can we fastpath a interface, super class or virtual method call? Computes method's vtable
// index.
@@ -377,7 +380,7 @@
bool update_stats, bool enable_devirtualization,
InvokeType* type, MethodReference* target_method, int* vtable_idx,
uintptr_t* direct_code, uintptr_t* direct_method)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
const VerifiedMethod* GetVerifiedMethod(const DexFile* dex_file, uint32_t method_idx) const;
bool IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc);
@@ -445,7 +448,7 @@
bool IsMethodToCompile(const MethodReference& method_ref) const;
void RecordClassStatus(ClassReference ref, mirror::Class::Status status)
- LOCKS_EXCLUDED(compiled_classes_lock_);
+ REQUIRES(!compiled_classes_lock_);
// Checks if the specified method has been verified without failures. Returns
// false if the method is not in the verification results (GetVerificationResults).
@@ -487,7 +490,7 @@
ArtMember* resolved_member,
uint16_t member_idx,
uint32_t* storage_index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can `referrer_class` access the resolved `member`?
// Dispatch call to mirror::Class::CanAccessResolvedField or
@@ -499,17 +502,17 @@
ArtMember* member,
mirror::DexCache* dex_cache,
uint32_t field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can we assume that the klass is initialized?
bool CanAssumeClassIsInitialized(mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool CanReferrerAssumeClassIsInitialized(mirror::Class* referrer_class, mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can we assume that the klass is loaded?
bool CanAssumeClassIsLoaded(mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// These flags are internal to CompilerDriver for collecting INVOKE resolution statistics.
// The only external contract is that unresolved method has flags 0 and resolved non-0.
@@ -540,71 +543,68 @@
/*out*/int* stats_flags,
MethodReference* target_method,
uintptr_t* direct_code, uintptr_t* direct_method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
DexToDexCompilationLevel GetDexToDexCompilationlevel(
Thread* self, Handle<mirror::ClassLoader> class_loader, const DexFile& dex_file,
- const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const DexFile::ClassDef& class_def) SHARED_REQUIRES(Locks::mutator_lock_);
void PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
- void LoadImageClasses(TimingLogger* timings);
+ void LoadImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
// Attempt to resolve all type, methods, fields, and strings
// referenced from code in the dex file following PathClassLoader
// ordering semantics.
void Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
void ResolveDexFile(jobject class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
void Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings);
void VerifyDexFile(jobject class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
void SetVerified(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings);
void SetVerifiedDexFile(jobject class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
void InitializeClasses(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
void InitializeClasses(jobject class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_, compiled_classes_lock_);
+ REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
- void UpdateImageClasses(TimingLogger* timings) LOCKS_EXCLUDED(Locks::mutator_lock_);
+ void UpdateImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
static void FindClinitImageClassesCallback(mirror::Object* object, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings);
void CompileDexFile(jobject class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
void CompileMethod(Thread* self, const DexFile::CodeItem* code_item, uint32_t access_flags,
InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx,
jobject class_loader, const DexFile& dex_file,
DexToDexCompilationLevel dex_to_dex_compilation_level,
bool compilation_enabled)
- LOCKS_EXCLUDED(compiled_methods_lock_);
-
- static void CompileClass(const ParallelCompilationManager* context, size_t class_def_index)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!compiled_methods_lock_);
// Swap pool and allocator used for native allocations. May be file-backed. Needs to be first
// as other fields rely on this.
@@ -776,6 +776,7 @@
DedupeSet<ArrayRef<const uint8_t>,
SwapVector<uint8_t>, size_t, DedupeHashFunc<const uint8_t>, 4> dedupe_cfi_info_;
+ friend class CompileClassVisitor;
DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
};
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index b358f4f..e35d07d 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -37,7 +37,7 @@
class CompilerDriverTest : public CommonCompilerTest {
protected:
- void CompileAll(jobject class_loader) LOCKS_EXCLUDED(Locks::mutator_lock_) {
+ void CompileAll(jobject class_loader) REQUIRES(!Locks::mutator_lock_) {
TimingLogger timings("CompilerDriverTest::CompileAll", false, false);
TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
compiler_driver_->CompileAll(class_loader,
@@ -49,7 +49,7 @@
void EnsureCompiled(jobject class_loader, const char* class_name, const char* method,
const char* signature, bool is_virtual)
- LOCKS_EXCLUDED(Locks::mutator_lock_) {
+ REQUIRES(!Locks::mutator_lock_) {
CompileAll(class_loader);
Thread::Current()->TransitionFromSuspendedToRunnable();
bool started = runtime_->Start();
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 226e6b7..3f5a1ea 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -27,6 +27,8 @@
small_method_threshold_(kDefaultSmallMethodThreshold),
tiny_method_threshold_(kDefaultTinyMethodThreshold),
num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold),
+ inline_depth_limit_(kDefaultInlineDepthLimit),
+ inline_max_code_units_(kDefaultInlineMaxCodeUnits),
include_patch_information_(kDefaultIncludePatchInformation),
top_k_profile_threshold_(kDefaultTopKProfileThreshold),
debuggable_(false),
@@ -52,6 +54,8 @@
size_t small_method_threshold,
size_t tiny_method_threshold,
size_t num_dex_methods_threshold,
+ size_t inline_depth_limit,
+ size_t inline_max_code_units,
bool include_patch_information,
double top_k_profile_threshold,
bool debuggable,
@@ -71,6 +75,8 @@
small_method_threshold_(small_method_threshold),
tiny_method_threshold_(tiny_method_threshold),
num_dex_methods_threshold_(num_dex_methods_threshold),
+ inline_depth_limit_(inline_depth_limit),
+ inline_max_code_units_(inline_max_code_units),
include_patch_information_(include_patch_information),
top_k_profile_threshold_(top_k_profile_threshold),
debuggable_(debuggable),
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index fe681e2..17b19dd 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -51,6 +51,8 @@
static constexpr double kDefaultTopKProfileThreshold = 90.0;
static const bool kDefaultGenerateDebugInfo = kIsDebugBuild;
static const bool kDefaultIncludePatchInformation = false;
+ static const size_t kDefaultInlineDepthLimit = 3;
+ static const size_t kDefaultInlineMaxCodeUnits = 18;
CompilerOptions();
~CompilerOptions();
@@ -61,6 +63,8 @@
size_t small_method_threshold,
size_t tiny_method_threshold,
size_t num_dex_methods_threshold,
+ size_t inline_depth_limit,
+ size_t inline_max_code_units,
bool include_patch_information,
double top_k_profile_threshold,
bool debuggable,
@@ -137,6 +141,14 @@
return num_dex_methods_threshold_;
}
+ size_t GetInlineDepthLimit() const {
+ return inline_depth_limit_;
+ }
+
+ size_t GetInlineMaxCodeUnits() const {
+ return inline_max_code_units_;
+ }
+
double GetTopKProfileThreshold() const {
return top_k_profile_threshold_;
}
@@ -202,6 +214,8 @@
const size_t small_method_threshold_;
const size_t tiny_method_threshold_;
const size_t num_dex_methods_threshold_;
+ const size_t inline_depth_limit_;
+ const size_t inline_max_code_units_;
const bool include_patch_information_;
// When using a profile file only the top K% of the profiled samples will be compiled.
const double top_k_profile_threshold_;
diff --git a/compiler/dwarf/dwarf_test.cc b/compiler/dwarf/dwarf_test.cc
index 4d423d0..a07d27c 100644
--- a/compiler/dwarf/dwarf_test.cc
+++ b/compiler/dwarf/dwarf_test.cc
@@ -27,7 +27,7 @@
namespace dwarf {
// Run the tests only on host since we need objdump.
-#ifndef HAVE_ANDROID_OS
+#ifndef __ANDROID__
constexpr CFIFormat kCFIFormat = DW_DEBUG_FRAME_FORMAT;
@@ -336,7 +336,7 @@
CheckObjdumpOutput(is64bit, "-W");
}
-#endif // HAVE_ANDROID_OS
+#endif // __ANDROID__
} // namespace dwarf
} // namespace art
diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h
index 8e13b51..03f8ceb 100644
--- a/compiler/elf_writer.h
+++ b/compiler/elf_writer.h
@@ -57,7 +57,7 @@
const std::vector<const DexFile*>& dex_files,
const std::string& android_root,
bool is_host)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
const CompilerDriver* const compiler_driver_;
File* const elf_file_;
diff --git a/compiler/elf_writer_quick.h b/compiler/elf_writer_quick.h
index fd202ee..83781ab 100644
--- a/compiler/elf_writer_quick.h
+++ b/compiler/elf_writer_quick.h
@@ -33,7 +33,7 @@
const std::string& android_root,
bool is_host,
const CompilerDriver& driver)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void EncodeOatPatches(const std::vector<uintptr_t>& locations,
std::vector<uint8_t>* buffer);
@@ -44,7 +44,7 @@
const std::string& android_root,
bool is_host)
OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
ElfWriterQuick(const CompilerDriver& driver, File* elf_file)
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 2b65aa9..17d75a3 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -73,7 +73,7 @@
static constexpr bool kComputeEagerResolvedStrings = false;
static void CheckNoDexObjectsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Class* klass = obj->GetClass();
CHECK_NE(PrettyClass(klass), "com.android.dex.Dex");
}
@@ -141,7 +141,7 @@
return false;
}
std::string error_msg;
- oat_file_ = OatFile::OpenReadable(oat_file.get(), oat_location, nullptr, &error_msg);
+ oat_file_ = OatFile::OpenReadable(oat_file.get(), oat_location, nullptr, outof(error_msg));
if (oat_file_ == nullptr) {
PLOG(ERROR) << "Failed to open writable oat file " << oat_filename << " for " << oat_location
<< ": " << error_msg;
@@ -539,16 +539,19 @@
return true;
}
+class ComputeLazyFieldsForClassesVisitor : public ClassVisitor {
+ public:
+ bool Visit(Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ StackHandleScope<1> hs(Thread::Current());
+ mirror::Class::ComputeName(hs.NewHandle(c));
+ return true;
+ }
+};
+
void ImageWriter::ComputeLazyFieldsForImageClasses() {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, nullptr);
-}
-
-bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) {
- Thread* self = Thread::Current();
- StackHandleScope<1> hs(self);
- mirror::Class::ComputeName(hs.NewHandle(c));
- return true;
+ ComputeLazyFieldsForClassesVisitor visitor;
+ class_linker->VisitClassesWithoutClassesLock(&visitor);
}
void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED) {
@@ -592,9 +595,20 @@
return compiler_driver_.IsImageClass(klass->GetDescriptor(&temp));
}
-struct NonImageClasses {
- ImageWriter* image_writer;
- std::set<std::string>* non_image_classes;
+class NonImageClassesVisitor : public ClassVisitor {
+ public:
+ explicit NonImageClassesVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {}
+
+ bool Visit(Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!image_writer_->IsImageClass(klass)) {
+ std::string temp;
+ non_image_classes_.insert(klass->GetDescriptor(&temp));
+ }
+ return true;
+ }
+
+ std::set<std::string> non_image_classes_;
+ ImageWriter* const image_writer_;
};
void ImageWriter::PruneNonImageClasses() {
@@ -606,14 +620,11 @@
Thread* self = Thread::Current();
// Make a list of classes we would like to prune.
- std::set<std::string> non_image_classes;
- NonImageClasses context;
- context.image_writer = this;
- context.non_image_classes = &non_image_classes;
- class_linker->VisitClasses(NonImageClassesVisitor, &context);
+ NonImageClassesVisitor visitor(this);
+ class_linker->VisitClasses(&visitor);
// Remove the undesired classes from the class roots.
- for (const std::string& it : non_image_classes) {
+ for (const std::string& it : visitor.non_image_classes_) {
bool result = class_linker->RemoveClass(it.c_str(), nullptr);
DCHECK(result);
}
@@ -669,15 +680,6 @@
class_linker->DropFindArrayClassCache();
}
-bool ImageWriter::NonImageClassesVisitor(Class* klass, void* arg) {
- NonImageClasses* context = reinterpret_cast<NonImageClasses*>(arg);
- if (!context->image_writer->IsImageClass(klass)) {
- std::string temp;
- context->non_image_classes->insert(klass->GetDescriptor(&temp));
- }
- return true;
-}
-
void ImageWriter::CheckNonImageClassesRemoved() {
if (compiler_driver_.GetImageClasses() != nullptr) {
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -1035,7 +1037,7 @@
}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
*roots[i] = ImageAddress(*roots[i]);
}
@@ -1043,7 +1045,7 @@
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
roots[i]->Assign(ImageAddress(roots[i]->AsMirrorPtr()));
}
@@ -1052,7 +1054,7 @@
private:
ImageWriter* const image_writer_;
- mirror::Object* ImageAddress(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* ImageAddress(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
const size_t offset = image_writer_->GetImageOffset(obj);
auto* const dest = reinterpret_cast<Object*>(image_writer_->image_begin_ + offset);
VLOG(compiler) << "Update root from " << obj << " to " << dest;
@@ -1189,8 +1191,15 @@
FixupVisitor(ImageWriter* image_writer, Object* copy) : image_writer_(image_writer), copy_(copy) {
}
+ // Ignore class roots since we don't have a way to map them to the destination. These are handled
+ // with other logic.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+ const {}
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+
+
void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Object* ref = obj->GetFieldObject<Object, kVerifyNone>(offset);
// Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
// image.
@@ -1200,8 +1209,7 @@
// java.lang.ref.Reference visitor.
void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
mirror::Reference::ReferentOffset(), image_writer_->GetImageAddress(ref->GetReferent()));
}
@@ -1217,15 +1225,14 @@
}
void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
DCHECK(obj->IsClass());
FixupVisitor::operator()(obj, offset, /*is_static*/false);
}
void operator()(mirror::Class* klass ATTRIBUTE_UNUSED,
mirror::Reference* ref ATTRIBUTE_UNUSED) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
LOG(FATAL) << "Reference not expected here.";
}
};
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 1523383..cabd918 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -69,15 +69,15 @@
}
template <typename T>
- T* GetImageAddress(T* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ T* GetImageAddress(T* object) const SHARED_REQUIRES(Locks::mutator_lock_) {
return object == nullptr ? nullptr :
reinterpret_cast<T*>(image_begin_ + GetImageOffset(object));
}
- ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
mirror::HeapReference<mirror::Object>* GetDexCacheArrayElementImageAddress(
- const DexFile* dex_file, uint32_t offset) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DexFile* dex_file, uint32_t offset) const SHARED_REQUIRES(Locks::mutator_lock_) {
auto it = dex_cache_array_starts_.find(dex_file);
DCHECK(it != dex_cache_array_starts_.end());
return reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
@@ -88,7 +88,7 @@
bool Write(const std::string& image_filename, const std::string& oat_filename,
const std::string& oat_location)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
uintptr_t GetOatDataBegin() {
return reinterpret_cast<uintptr_t>(oat_data_begin_);
@@ -98,7 +98,7 @@
bool AllocMemory();
// Mark the objects defined in this space in the given live bitmap.
- void RecordImageAllocations() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void RecordImageAllocations() SHARED_REQUIRES(Locks::mutator_lock_);
// Classify different kinds of bins that objects end up getting packed into during image writing.
enum Bin {
@@ -165,32 +165,32 @@
// We use the lock word to store the offset of the object in the image.
void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void SetImageOffset(mirror::Object* object, size_t offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool IsImageOffsetAssigned(mirror::Object* object) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- size_t GetImageOffset(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t GetImageOffset(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
void UpdateImageOffset(mirror::Object* obj, uintptr_t offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- void PrepareDexCacheArraySlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void AssignImageBinSlot(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void PrepareDexCacheArraySlots() SHARED_REQUIRES(Locks::mutator_lock_);
+ void AssignImageBinSlot(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool IsImageBinSlotAssigned(mirror::Object* object) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
- void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_);
static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
}
mirror::Object* GetLocalAddress(mirror::Object* object) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
size_t offset = GetImageOffset(object);
uint8_t* dst = image_->Begin() + offset;
return reinterpret_cast<mirror::Object*>(dst);
@@ -209,74 +209,70 @@
}
// Returns true if the class was in the original requested image classes list.
- bool IsImageClass(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
// Debug aid that list of requested image classes.
void DumpImageClasses();
// Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
void ComputeLazyFieldsForImageClasses()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static bool ComputeLazyFieldsForClassesVisitor(mirror::Class* klass, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Wire dex cache resolved strings to strings in the image to avoid runtime resolution.
- void ComputeEagerResolvedStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ComputeEagerResolvedStrings() SHARED_REQUIRES(Locks::mutator_lock_);
static void ComputeEagerResolvedStringsCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Remove unwanted classes from various roots.
- void PruneNonImageClasses() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static bool NonImageClassesVisitor(mirror::Class* c, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void PruneNonImageClasses() SHARED_REQUIRES(Locks::mutator_lock_);
// Verify unwanted classes removed.
- void CheckNonImageClassesRemoved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CheckNonImageClassesRemoved() SHARED_REQUIRES(Locks::mutator_lock_);
static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Lays out where the image objects will be at runtime.
void CalculateNewObjectOffsets()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CreateHeader(size_t oat_loaded_size, size_t oat_data_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* CreateImageRoots() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CalculateObjectBinSlots(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void UnbinObjectsIntoOffset(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void WalkInstanceFields(mirror::Object* obj, mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void WalkFieldsInOrder(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void WalkFieldsCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Creates the contiguous image in memory and adjusts pointers.
- void CopyAndFixupNativeData() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CopyAndFixupObjects() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CopyAndFixupNativeData() SHARED_REQUIRES(Locks::mutator_lock_);
+ void CopyAndFixupObjects() SHARED_REQUIRES(Locks::mutator_lock_);
static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CopyAndFixupObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void FixupClass(mirror::Class* orig, mirror::Class* copy)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void FixupObject(mirror::Object* orig, mirror::Object* copy)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void FixupPointerArray(mirror::Object* dst, mirror::PointerArray* arr, mirror::Class* klass,
- Bin array_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Bin array_type) SHARED_REQUIRES(Locks::mutator_lock_);
// Get quick code for non-resolution/imt_conflict/abstract method.
const uint8_t* GetQuickCode(ArtMethod* method, bool* quick_is_interpreted)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const uint8_t* GetQuickEntryPoint(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Patches references in OatFile to expect runtime addresses.
void SetOatChecksumFromElfFile(File* elf_file);
@@ -285,10 +281,10 @@
size_t GetBinSizeSum(Bin up_to = kBinSize) const;
// Return true if a method is likely to be dirtied at runtime.
- bool WillMethodBeDirty(ArtMethod* m) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool WillMethodBeDirty(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_);
// Assign the offset for an ArtMethod.
- void AssignMethodOffset(ArtMethod* method, Bin bin) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void AssignMethodOffset(ArtMethod* method, Bin bin) SHARED_REQUIRES(Locks::mutator_lock_);
const CompilerDriver& compiler_driver_;
@@ -376,6 +372,7 @@
friend class FixupClassVisitor;
friend class FixupRootVisitor;
friend class FixupVisitor;
+ friend class NonImageClassesVisitor;
DISALLOW_COPY_AND_ASSIGN(ImageWriter);
};
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index a122ceb..c95bac2 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -55,7 +55,7 @@
}
extern "C" bool jit_compile_method(void* handle, ArtMethod* method, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
DCHECK(jit_compiler != nullptr);
return jit_compiler->CompileMethod(self, method);
@@ -71,6 +71,8 @@
CompilerOptions::kDefaultSmallMethodThreshold,
CompilerOptions::kDefaultTinyMethodThreshold,
CompilerOptions::kDefaultNumDexMethodsThreshold,
+ CompilerOptions::kDefaultInlineDepthLimit,
+ CompilerOptions::kDefaultInlineMaxCodeUnits,
/* include_patch_information */ false,
CompilerOptions::kDefaultTopKProfileThreshold,
Runtime::Current()->IsDebuggable(),
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index b0010e0..ef68caa 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -38,11 +38,11 @@
static JitCompiler* Create();
virtual ~JitCompiler();
bool CompileMethod(Thread* self, ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// This is in the compiler since the runtime doesn't have access to the compiled method
// structures.
bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method,
- OatFile::OatMethod* out_method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ OatFile::OatMethod* out_method) SHARED_REQUIRES(Locks::mutator_lock_);
CompilerCallbacks* GetCompilerCallbacks() const;
size_t GetTotalCompileTime() const {
return total_time_;
@@ -63,7 +63,7 @@
const CompiledMethod* compiled_method, uint8_t* reserve_begin, uint8_t* reserve_end,
const uint8_t* mapping_table, const uint8_t* vmap_table, const uint8_t* gc_map);
bool MakeExecutable(CompiledMethod* compiled_method, ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(JitCompiler);
};
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
index 016f28e..0bfe8a2 100644
--- a/compiler/jni/jni_cfi_test.cc
+++ b/compiler/jni/jni_cfi_test.cc
@@ -28,7 +28,7 @@
namespace art {
// Run the tests only on host.
-#ifndef HAVE_ANDROID_OS
+#ifndef __ANDROID__
class JNICFITest : public CFITest {
public:
@@ -88,6 +88,6 @@
TEST_ISA(kMips)
TEST_ISA(kMips64)
-#endif // HAVE_ANDROID_OS
+#endif // __ANDROID__
} // namespace art
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 0747756..05a33d7 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -16,6 +16,7 @@
#include "arch/instruction_set_features.h"
#include "art_method-inl.h"
+#include "base/out.h"
#include "class_linker.h"
#include "common_compiler_test.h"
#include "compiled_method.h"
@@ -44,7 +45,7 @@
void CheckMethod(ArtMethod* method,
const OatFile::OatMethod& oat_method,
const DexFile& dex_file)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const CompiledMethod* compiled_method =
compiler_driver_->GetCompiledMethod(MethodReference(&dex_file,
method->GetDexMethodIndex()));
@@ -83,7 +84,7 @@
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> insn_features(
- InstructionSetFeatures::FromVariant(insn_set, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(insn_set, "default", outof(error_msg)));
ASSERT_TRUE(insn_features.get() != nullptr) << error_msg;
compiler_options_.reset(new CompilerOptions);
verification_results_.reset(new VerificationResults(compiler_options_.get()));
@@ -123,7 +124,7 @@
compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
}
std::unique_ptr<OatFile> oat_file(OatFile::Open(tmp.GetFilename(), tmp.GetFilename(), nullptr,
- nullptr, false, nullptr, &error_msg));
+ nullptr, false, nullptr, outof(error_msg)));
ASSERT_TRUE(oat_file.get() != nullptr) << error_msg;
const OatHeader& oat_header = oat_file->GetOatHeader();
ASSERT_TRUE(oat_header.IsValid());
@@ -183,14 +184,14 @@
EXPECT_EQ(72U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(28U, sizeof(OatQuickMethodHeader));
- EXPECT_EQ(112 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
+ EXPECT_EQ(113 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
}
TEST_F(OatTest, OatHeaderIsValid) {
InstructionSet insn_set = kX86;
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> insn_features(
- InstructionSetFeatures::FromVariant(insn_set, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(insn_set, "default", outof(error_msg)));
ASSERT_TRUE(insn_features.get() != nullptr) << error_msg;
std::vector<const DexFile*> dex_files;
uint32_t image_file_location_oat_checksum = 0;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 4318ea5..64e7487 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -365,7 +365,7 @@
}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -560,7 +560,7 @@
}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -601,7 +601,7 @@
}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -665,7 +665,7 @@
}
bool StartClass(const DexFile* dex_file, size_t class_def_index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
OatDexMethodVisitor::StartClass(dex_file, class_def_index);
if (dex_cache_ == nullptr || dex_cache_->GetDexFile() != dex_file) {
dex_cache_ = class_linker_->FindDexCache(*dex_file);
@@ -673,7 +673,7 @@
return true;
}
- bool EndClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool EndClass() SHARED_REQUIRES(Locks::mutator_lock_) {
bool result = OatDexMethodVisitor::EndClass();
if (oat_class_index_ == writer_->oat_classes_.size()) {
DCHECK(result); // OatDexMethodVisitor::EndClass() never fails.
@@ -687,7 +687,7 @@
}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -793,7 +793,7 @@
}
ArtMethod* GetTargetMethod(const LinkerPatch& patch)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
MethodReference ref = patch.TargetMethod();
mirror::DexCache* dex_cache =
(dex_file_ == ref.dex_file) ? dex_cache_ : class_linker_->FindDexCache(*ref.dex_file);
@@ -803,7 +803,7 @@
return method;
}
- uint32_t GetTargetOffset(const LinkerPatch& patch) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t GetTargetOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
auto target_it = writer_->method_offset_map_.map.find(patch.TargetMethod());
uint32_t target_offset =
(target_it != writer_->method_offset_map_.map.end()) ? target_it->second : 0u;
@@ -828,7 +828,7 @@
}
mirror::Class* GetTargetType(const LinkerPatch& patch)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::DexCache* dex_cache = (dex_file_ == patch.TargetTypeDexFile())
? dex_cache_ : class_linker_->FindDexCache(*patch.TargetTypeDexFile());
mirror::Class* type = dex_cache->GetResolvedType(patch.TargetTypeIndex());
@@ -836,7 +836,7 @@
return type;
}
- uint32_t GetDexCacheOffset(const LinkerPatch& patch) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t GetDexCacheOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
if (writer_->image_writer_ != nullptr) {
auto* element = writer_->image_writer_->GetDexCacheArrayElementImageAddress(
patch.TargetDexCacheDexFile(), patch.TargetDexCacheElementOffset());
@@ -849,7 +849,7 @@
}
void PatchObjectAddress(std::vector<uint8_t>* code, uint32_t offset, mirror::Object* object)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// NOTE: Direct method pointers across oat files don't use linker patches. However, direct
// type pointers across oat files do. (TODO: Investigate why.)
if (writer_->image_writer_ != nullptr) {
@@ -865,7 +865,7 @@
}
void PatchMethodAddress(std::vector<uint8_t>* code, uint32_t offset, ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// NOTE: Direct method pointers across oat files don't use linker patches. However, direct
// type pointers across oat files do. (TODO: Investigate why.)
if (writer_->image_writer_ != nullptr) {
@@ -882,7 +882,7 @@
}
void PatchCodeAddress(std::vector<uint8_t>* code, uint32_t offset, uint32_t target_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t address = writer_->image_writer_ == nullptr ? target_offset :
PointerToLowMemUInt32(writer_->image_writer_->GetOatFileBegin() +
writer_->oat_data_offset_ + target_offset);
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 82b9377..3baf438 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -165,9 +165,9 @@
size_t InitOatClasses(size_t offset);
size_t InitOatMaps(size_t offset);
size_t InitOatCode(size_t offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
size_t InitOatCodeDexFiles(size_t offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool WriteTables(OutputStream* out, const size_t file_offset);
size_t WriteMaps(OutputStream* out, const size_t file_offset, size_t relative_offset);
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 52a3a15..8841498 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -357,9 +357,10 @@
HInstruction* first_insn = block->GetFirstInstruction();
if (first_insn->IsLoadException()) {
// Catch block starts with a LoadException. Split the block after the
- // StoreLocal that must come after the load.
+ // StoreLocal and ClearException which must come after the load.
DCHECK(first_insn->GetNext()->IsStoreLocal());
- block = block->SplitBefore(first_insn->GetNext()->GetNext());
+ DCHECK(first_insn->GetNext()->GetNext()->IsClearException());
+ block = block->SplitBefore(first_insn->GetNext()->GetNext()->GetNext());
} else {
// Catch block does not load the exception. Split at the beginning to
// create an empty catch block.
@@ -2552,6 +2553,7 @@
case Instruction::MOVE_EXCEPTION: {
current_block_->AddInstruction(new (arena_) HLoadException());
UpdateLocal(instruction.VRegA_11x(), current_block_->GetLastInstruction());
+ current_block_->AddInstruction(new (arena_) HClearException());
break;
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 75b8f06..b0a4ce2 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -4290,6 +4290,10 @@
__ Bind(slow_path->GetExitLabel());
}
+static int32_t GetExceptionTlsOffset() {
+ return Thread::ExceptionOffset<kArmWordSize>().Int32Value();
+}
+
void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
@@ -4298,10 +4302,16 @@
void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) {
Register out = load->GetLocations()->Out().AsRegister<Register>();
- int32_t offset = Thread::ExceptionOffset<kArmWordSize>().Int32Value();
- __ LoadFromOffset(kLoadWord, out, TR, offset);
+ __ LoadFromOffset(kLoadWord, out, TR, GetExceptionTlsOffset());
+}
+
+void LocationsBuilderARM::VisitClearException(HClearException* clear) {
+ new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+}
+
+void InstructionCodeGeneratorARM::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
__ LoadImmediate(IP, 0);
- __ StoreToOffset(kStoreWord, IP, TR, offset);
+ __ StoreToOffset(kStoreWord, IP, TR, GetExceptionTlsOffset());
}
void LocationsBuilderARM::VisitThrow(HThrow* instruction) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 11de4ee..bbde7e8 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2496,6 +2496,10 @@
}
}
+static MemOperand GetExceptionTlsAddress() {
+ return MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value());
+}
+
void LocationsBuilderARM64::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
@@ -2503,9 +2507,15 @@
}
void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instruction) {
- MemOperand exception = MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value());
- __ Ldr(OutputRegister(instruction), exception);
- __ Str(wzr, exception);
+ __ Ldr(OutputRegister(instruction), GetExceptionTlsAddress());
+}
+
+void LocationsBuilderARM64::VisitClearException(HClearException* clear) {
+ new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+}
+
+void InstructionCodeGeneratorARM64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
+ __ Str(wzr, GetExceptionTlsAddress());
}
void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index e7d2ec6..a5bad65 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2544,6 +2544,10 @@
}
}
+static int32_t GetExceptionTlsOffset() {
+ return Thread::ExceptionOffset<kMips64WordSize>().Int32Value();
+}
+
void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
@@ -2552,8 +2556,15 @@
void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) {
GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>();
- __ LoadFromOffset(kLoadUnsignedWord, out, TR, Thread::ExceptionOffset<kMips64WordSize>().Int32Value());
- __ StoreToOffset(kStoreWord, ZERO, TR, Thread::ExceptionOffset<kMips64WordSize>().Int32Value());
+ __ LoadFromOffset(kLoadUnsignedWord, out, TR, GetExceptionTlsOffset());
+}
+
+void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) {
+ new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
+ __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
}
void LocationsBuilderMIPS64::VisitLoadLocal(HLoadLocal* load) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index e15eff9..fdef06b 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -2348,7 +2348,12 @@
case Primitive::kPrimInt:
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
- locations->SetOut(Location::SameAsFirstInput());
+ if (mul->InputAt(1)->IsIntConstant()) {
+ // Can use 3 operand multiply.
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ } else {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
break;
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -2376,21 +2381,24 @@
LocationSummary* locations = mul->GetLocations();
Location first = locations->InAt(0);
Location second = locations->InAt(1);
- DCHECK(first.Equals(locations->Out()));
+ Location out = locations->Out();
switch (mul->GetResultType()) {
- case Primitive::kPrimInt: {
- if (second.IsRegister()) {
+ case Primitive::kPrimInt:
+ // The constant may have ended up in a register, so test explicitly to avoid
+ // problems where the output may not be the same as the first operand.
+ if (mul->InputAt(1)->IsIntConstant()) {
+ Immediate imm(mul->InputAt(1)->AsIntConstant()->GetValue());
+ __ imull(out.AsRegister<Register>(), first.AsRegister<Register>(), imm);
+ } else if (second.IsRegister()) {
+ DCHECK(first.Equals(out));
__ imull(first.AsRegister<Register>(), second.AsRegister<Register>());
- } else if (second.IsConstant()) {
- Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
- __ imull(first.AsRegister<Register>(), imm);
} else {
DCHECK(second.IsStackSlot());
+ DCHECK(first.Equals(out));
__ imull(first.AsRegister<Register>(), Address(ESP, second.GetStackIndex()));
}
break;
- }
case Primitive::kPrimLong: {
Register in1_hi = first.AsRegisterPairHigh<Register>();
@@ -4535,7 +4543,11 @@
Location destination = move->GetDestination();
if (source.IsRegister() && destination.IsRegister()) {
- __ xchgl(destination.AsRegister<Register>(), source.AsRegister<Register>());
+ // Use XOR swap algorithm to avoid serializing XCHG instruction or using a temporary.
+ DCHECK_NE(destination.AsRegister<Register>(), source.AsRegister<Register>());
+ __ xorl(destination.AsRegister<Register>(), source.AsRegister<Register>());
+ __ xorl(source.AsRegister<Register>(), destination.AsRegister<Register>());
+ __ xorl(destination.AsRegister<Register>(), source.AsRegister<Register>());
} else if (source.IsRegister() && destination.IsStackSlot()) {
Exchange(source.AsRegister<Register>(), destination.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsRegister()) {
@@ -4681,6 +4693,10 @@
__ Bind(slow_path->GetExitLabel());
}
+static Address GetExceptionTlsAddress() {
+ return Address::Absolute(Thread::ExceptionOffset<kX86WordSize>().Int32Value());
+}
+
void LocationsBuilderX86::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
@@ -4688,9 +4704,15 @@
}
void InstructionCodeGeneratorX86::VisitLoadException(HLoadException* load) {
- Address address = Address::Absolute(Thread::ExceptionOffset<kX86WordSize>().Int32Value());
- __ fs()->movl(load->GetLocations()->Out().AsRegister<Register>(), address);
- __ fs()->movl(address, Immediate(0));
+ __ fs()->movl(load->GetLocations()->Out().AsRegister<Register>(), GetExceptionTlsAddress());
+}
+
+void LocationsBuilderX86::VisitClearException(HClearException* clear) {
+ new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+}
+
+void InstructionCodeGeneratorX86::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
+ __ fs()->movl(GetExceptionTlsAddress(), Immediate(0));
}
void LocationsBuilderX86::VisitThrow(HThrow* instruction) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index a95ce68..4fe93f9 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -2535,13 +2535,19 @@
case Primitive::kPrimInt: {
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
- locations->SetOut(Location::SameAsFirstInput());
+ if (mul->InputAt(1)->IsIntConstant()) {
+ // Can use 3 operand multiply.
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ } else {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
break;
}
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrInt32LongConstant(mul->InputAt(1)));
- if (locations->InAt(1).IsConstant()) {
+ locations->SetInAt(1, Location::Any());
+ if (mul->InputAt(1)->IsLongConstant() &&
+ IsInt<32>(mul->InputAt(1)->AsLongConstant()->GetValue())) {
// Can use 3 operand multiply.
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
} else {
@@ -2566,37 +2572,51 @@
LocationSummary* locations = mul->GetLocations();
Location first = locations->InAt(0);
Location second = locations->InAt(1);
+ Location out = locations->Out();
switch (mul->GetResultType()) {
- case Primitive::kPrimInt: {
- DCHECK(first.Equals(locations->Out()));
- if (second.IsRegister()) {
+ case Primitive::kPrimInt:
+ // The constant may have ended up in a register, so test explicitly to avoid
+ // problems where the output may not be the same as the first operand.
+ if (mul->InputAt(1)->IsIntConstant()) {
+ Immediate imm(mul->InputAt(1)->AsIntConstant()->GetValue());
+ __ imull(out.AsRegister<CpuRegister>(), first.AsRegister<CpuRegister>(), imm);
+ } else if (second.IsRegister()) {
+ DCHECK(first.Equals(out));
__ imull(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
- } else if (second.IsConstant()) {
- Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
- __ imull(first.AsRegister<CpuRegister>(), imm);
} else {
+ DCHECK(first.Equals(out));
DCHECK(second.IsStackSlot());
__ imull(first.AsRegister<CpuRegister>(),
Address(CpuRegister(RSP), second.GetStackIndex()));
}
break;
- }
case Primitive::kPrimLong: {
- if (second.IsConstant()) {
- int64_t value = second.GetConstant()->AsLongConstant()->GetValue();
- DCHECK(IsInt<32>(value));
- __ imulq(locations->Out().AsRegister<CpuRegister>(),
- first.AsRegister<CpuRegister>(),
- Immediate(static_cast<int32_t>(value)));
- } else {
- DCHECK(first.Equals(locations->Out()));
+ // The constant may have ended up in a register, so test explicitly to avoid
+ // problems where the output may not be the same as the first operand.
+ if (mul->InputAt(1)->IsLongConstant()) {
+ int64_t value = mul->InputAt(1)->AsLongConstant()->GetValue();
+ if (IsInt<32>(value)) {
+ __ imulq(out.AsRegister<CpuRegister>(), first.AsRegister<CpuRegister>(),
+ Immediate(static_cast<int32_t>(value)));
+ } else {
+ // Have to use the constant area.
+ DCHECK(first.Equals(out));
+ __ imulq(first.AsRegister<CpuRegister>(), codegen_->LiteralInt64Address(value));
+ }
+ } else if (second.IsRegister()) {
+ DCHECK(first.Equals(out));
__ imulq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ } else {
+ DCHECK(second.IsDoubleStackSlot());
+ DCHECK(first.Equals(out));
+ __ imulq(first.AsRegister<CpuRegister>(),
+ Address(CpuRegister(RSP), second.GetStackIndex()));
}
break;
}
case Primitive::kPrimFloat: {
- DCHECK(first.Equals(locations->Out()));
+ DCHECK(first.Equals(out));
if (second.IsFpuRegister()) {
__ mulss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
} else if (second.IsConstant()) {
@@ -2611,7 +2631,7 @@
}
case Primitive::kPrimDouble: {
- DCHECK(first.Equals(locations->Out()));
+ DCHECK(first.Equals(out));
if (second.IsFpuRegister()) {
__ mulsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
} else if (second.IsConstant()) {
@@ -4507,6 +4527,10 @@
__ Bind(slow_path->GetExitLabel());
}
+static Address GetExceptionTlsAddress() {
+ return Address::Absolute(Thread::ExceptionOffset<kX86_64WordSize>().Int32Value(), true);
+}
+
void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
@@ -4514,10 +4538,15 @@
}
void InstructionCodeGeneratorX86_64::VisitLoadException(HLoadException* load) {
- Address address = Address::Absolute(
- Thread::ExceptionOffset<kX86_64WordSize>().Int32Value(), true);
- __ gs()->movl(load->GetLocations()->Out().AsRegister<CpuRegister>(), address);
- __ gs()->movl(address, Immediate(0));
+ __ gs()->movl(load->GetLocations()->Out().AsRegister<CpuRegister>(), GetExceptionTlsAddress());
+}
+
+void LocationsBuilderX86_64::VisitClearException(HClearException* clear) {
+ new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+}
+
+void InstructionCodeGeneratorX86_64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
+ __ gs()->movl(GetExceptionTlsAddress(), Immediate(0));
}
void LocationsBuilderX86_64::VisitThrow(HThrow* instruction) {
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index cfebb77..e4bc9e6 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -89,6 +89,33 @@
block->GetBlockId()));
}
+ // Ensure that the only Return(Void) and Throw jump to Exit. An exiting
+ // TryBoundary may be between a Throw and the Exit if the Throw is in a try.
+ if (block->IsExitBlock()) {
+ for (size_t i = 0, e = block->GetPredecessors().Size(); i < e; ++i) {
+ HBasicBlock* predecessor = block->GetPredecessors().Get(i);
+ if (predecessor->IsSingleTryBoundary()
+ && !predecessor->GetLastInstruction()->AsTryBoundary()->IsEntry()) {
+ HBasicBlock* real_predecessor = predecessor->GetSinglePredecessor();
+ HInstruction* last_instruction = real_predecessor->GetLastInstruction();
+ if (!last_instruction->IsThrow()) {
+ AddError(StringPrintf("Unexpected TryBoundary between %s:%d and Exit.",
+ last_instruction->DebugName(),
+ last_instruction->GetId()));
+ }
+ } else {
+ HInstruction* last_instruction = predecessor->GetLastInstruction();
+ if (!last_instruction->IsReturn()
+ && !last_instruction->IsReturnVoid()
+ && !last_instruction->IsThrow()) {
+ AddError(StringPrintf("Unexpected instruction %s:%d jumps into the exit block.",
+ last_instruction->DebugName(),
+ last_instruction->GetId()));
+ }
+ }
+ }
+ }
+
// Visit this block's list of phis.
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
@@ -328,6 +355,39 @@
void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
super_type::VisitBasicBlock(block);
+ // Ensure that only catch blocks have exceptional predecessors, and if they do
+ // these are instructions which throw into them.
+ if (block->IsCatchBlock()) {
+ for (size_t i = 0, e = block->GetExceptionalPredecessors().Size(); i < e; ++i) {
+ HInstruction* thrower = block->GetExceptionalPredecessors().Get(i);
+ HBasicBlock* try_block = thrower->GetBlock();
+ if (!thrower->CanThrow()) {
+ AddError(StringPrintf("Exceptional predecessor %s:%d of catch block %d does not throw.",
+ thrower->DebugName(),
+ thrower->GetId(),
+ block->GetBlockId()));
+ } else if (!try_block->IsInTry()) {
+ AddError(StringPrintf("Exceptional predecessor %s:%d of catch block %d "
+ "is not in a try block.",
+ thrower->DebugName(),
+ thrower->GetId(),
+ block->GetBlockId()));
+ } else if (!try_block->GetTryEntry()->HasExceptionHandler(*block)) {
+ AddError(StringPrintf("Catch block %d is not an exception handler of "
+ "its exceptional predecessor %s:%d.",
+ block->GetBlockId(),
+ thrower->DebugName(),
+ thrower->GetId()));
+ }
+ }
+ } else {
+ if (!block->GetExceptionalPredecessors().IsEmpty()) {
+ AddError(StringPrintf("Normal block %d has %zu exceptional predecessors.",
+ block->GetBlockId(),
+ block->GetExceptionalPredecessors().Size()));
+ }
+ }
+
// Ensure that catch blocks are not normal successors, and normal blocks are
// never exceptional successors.
const size_t num_normal_successors = block->NumberOfNormalSuccessors();
@@ -512,6 +572,7 @@
void SSAChecker::VisitInstruction(HInstruction* instruction) {
super_type::VisitInstruction(instruction);
+ HBasicBlock* block = instruction->GetBlock();
// Ensure an instruction dominates all its uses.
for (HUseIterator<HInstruction*> use_it(instruction->GetUses());
@@ -543,6 +604,24 @@
}
}
}
+
+ // Ensure that throwing instructions in try blocks are listed as exceptional
+ // predecessors in their exception handlers.
+ if (instruction->CanThrow() && block->IsInTry()) {
+ for (HExceptionHandlerIterator handler_it(*block->GetTryEntry());
+ !handler_it.Done();
+ handler_it.Advance()) {
+ if (!handler_it.Current()->GetExceptionalPredecessors().Contains(instruction)) {
+ AddError(StringPrintf("Instruction %s:%d is in try block %d and can throw "
+ "but its exception handler %d does not list it in "
+ "its exceptional predecessors.",
+ instruction->DebugName(),
+ instruction->GetId(),
+ block->GetBlockId(),
+ handler_it.Current()->GetBlockId()));
+ }
+ }
+ }
}
static Primitive::Type PrimitiveKind(Primitive::Type type) {
@@ -590,11 +669,32 @@
if (phi->IsCatchPhi()) {
// The number of inputs of a catch phi corresponds to the total number of
// throwing instructions caught by this catch block.
+ const GrowableArray<HInstruction*>& predecessors =
+ phi->GetBlock()->GetExceptionalPredecessors();
+ if (phi->InputCount() != predecessors.Size()) {
+ AddError(StringPrintf(
+ "Phi %d in catch block %d has %zu inputs, "
+ "but catch block %d has %zu exceptional predecessors.",
+ phi->GetId(), phi->GetBlock()->GetBlockId(), phi->InputCount(),
+ phi->GetBlock()->GetBlockId(), predecessors.Size()));
+ } else {
+ for (size_t i = 0, e = phi->InputCount(); i < e; ++i) {
+ HInstruction* input = phi->InputAt(i);
+ HInstruction* thrower = predecessors.Get(i);
+ if (!input->StrictlyDominates(thrower)) {
+ AddError(StringPrintf(
+ "Input %d at index %zu of phi %d from catch block %d does not "
+ "dominate the throwing instruction %s:%d.",
+ input->GetId(), i, phi->GetId(), phi->GetBlock()->GetBlockId(),
+ thrower->DebugName(), thrower->GetId()));
+ }
+ }
+ }
} else {
// Ensure the number of inputs of a non-catch phi is the same as the number
// of its predecessors.
const GrowableArray<HBasicBlock*>& predecessors =
- phi->GetBlock()->GetPredecessors();
+ phi->GetBlock()->GetPredecessors();
if (phi->InputCount() != predecessors.Size()) {
AddError(StringPrintf(
"Phi %d in block %d has %zu inputs, "
diff --git a/compiler/optimizing/graph_checker_test.cc b/compiler/optimizing/graph_checker_test.cc
index eca0d93..0f66775 100644
--- a/compiler/optimizing/graph_checker_test.cc
+++ b/compiler/optimizing/graph_checker_test.cc
@@ -25,14 +25,14 @@
* Create a simple control-flow graph composed of two blocks:
*
* BasicBlock 0, succ: 1
- * 0: Goto 1
+ * 0: ReturnVoid 1
* BasicBlock 1, pred: 0
* 1: Exit
*/
HGraph* CreateSimpleCFG(ArenaAllocator* allocator) {
HGraph* graph = CreateGraph(allocator);
HBasicBlock* entry_block = new (allocator) HBasicBlock(graph);
- entry_block->AddInstruction(new (allocator) HGoto());
+ entry_block->AddInstruction(new (allocator) HReturnVoid());
graph->AddBlock(entry_block);
graph->SetEntryBlock(entry_block);
HBasicBlock* exit_block = new (allocator) HBasicBlock(graph);
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index d6b5636..069a7a4 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -386,6 +386,7 @@
StartAttributeStream("recursive") << std::boolalpha
<< invoke->IsRecursive()
<< std::noboolalpha;
+ StartAttributeStream("intrinsic") << invoke->GetIntrinsic();
}
void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE {
@@ -464,21 +465,19 @@
} else {
StartAttributeStream("loop") << "B" << info->GetHeader()->GetBlockId();
}
- } else if (IsReferenceTypePropagationPass() && is_after_pass_) {
- if (instruction->GetType() == Primitive::kPrimNot) {
- if (instruction->IsLoadClass()) {
- ReferenceTypeInfo info = instruction->AsLoadClass()->GetLoadedClassRTI();
- ScopedObjectAccess soa(Thread::Current());
- DCHECK(info.IsValid()) << "Invalid RTI for " << instruction->DebugName();
- StartAttributeStream("klass") << PrettyDescriptor(info.GetTypeHandle().Get());
- StartAttributeStream("exact") << std::boolalpha << info.IsExact() << std::noboolalpha;
- } else {
- ReferenceTypeInfo info = instruction->GetReferenceTypeInfo();
- ScopedObjectAccess soa(Thread::Current());
- DCHECK(info.IsValid()) << "Invalid RTI for " << instruction->DebugName();
- StartAttributeStream("klass") << PrettyDescriptor(info.GetTypeHandle().Get());
- StartAttributeStream("exact") << std::boolalpha << info.IsExact() << std::noboolalpha;
- }
+ } else if (IsReferenceTypePropagationPass()
+ && (instruction->GetType() == Primitive::kPrimNot)) {
+ ReferenceTypeInfo info = instruction->IsLoadClass()
+ ? instruction->AsLoadClass()->GetLoadedClassRTI()
+ : instruction->GetReferenceTypeInfo();
+ ScopedObjectAccess soa(Thread::Current());
+ if (info.IsValid()) {
+ StartAttributeStream("klass") << PrettyDescriptor(info.GetTypeHandle().Get());
+ StartAttributeStream("can_be_null")
+ << std::boolalpha << instruction->CanBeNull() << std::noboolalpha;
+ StartAttributeStream("exact") << std::boolalpha << info.IsExact() << std::noboolalpha;
+ } else {
+ DCHECK(!is_after_pass_) << "Type info should be valid after reference type propagation";
}
}
if (disasm_info_ != nullptr) {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 1551c15..0106595 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -22,8 +22,10 @@
#include "constant_folding.h"
#include "dead_code_elimination.h"
#include "driver/compiler_driver-inl.h"
+#include "driver/compiler_options.h"
#include "driver/dex_compilation_unit.h"
#include "instruction_simplifier.h"
+#include "intrinsics.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
#include "nodes.h"
@@ -38,9 +40,6 @@
namespace art {
-static constexpr int kMaxInlineCodeUnits = 18;
-static constexpr int kDepthLimit = 3;
-
void HInliner::Run() {
if (graph_->IsDebuggable()) {
// For simplicity, we currently never inline when the graph is debuggable. This avoids
@@ -86,7 +85,7 @@
}
static bool IsMethodOrDeclaringClassFinal(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return method->IsFinal() || method->GetDeclaringClass()->IsFinal();
}
@@ -96,7 +95,7 @@
* Return nullptr if the runtime target cannot be proven.
*/
static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resolved_method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (IsMethodOrDeclaringClassFinal(resolved_method)) {
// No need to lookup further, the resolved method will be the target.
return resolved_method;
@@ -162,7 +161,7 @@
static uint32_t FindMethodIndexIn(ArtMethod* method,
const DexFile& dex_file,
uint32_t referrer_index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (method->GetDexFile()->GetLocation().compare(dex_file.GetLocation()) == 0) {
return method->GetDexMethodIndex();
} else {
@@ -219,7 +218,8 @@
return false;
}
- if (code_item->insns_size_in_code_units_ > kMaxInlineCodeUnits) {
+ size_t inline_max_code_units = compiler_driver_->GetCompilerOptions().GetInlineMaxCodeUnits();
+ if (code_item->insns_size_in_code_units_ > inline_max_code_units) {
VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
<< " is too big to inline";
return false;
@@ -271,11 +271,11 @@
const DexFile::CodeItem* code_item = resolved_method->GetCodeItem();
const DexFile& callee_dex_file = *resolved_method->GetDexFile();
uint32_t method_index = resolved_method->GetDexMethodIndex();
-
+ ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
DexCompilationUnit dex_compilation_unit(
nullptr,
caller_compilation_unit_.GetClassLoader(),
- caller_compilation_unit_.GetClassLinker(),
+ class_linker,
*resolved_method->GetDexFile(),
code_item,
resolved_method->GetDeclaringClass()->GetDexClassDefIndex(),
@@ -356,8 +356,10 @@
HConstantFolding fold(callee_graph);
ReferenceTypePropagation type_propagation(callee_graph, handles_);
InstructionSimplifier simplify(callee_graph, stats_);
+ IntrinsicsRecognizer intrinsics(callee_graph, compiler_driver_);
HOptimization* optimizations[] = {
+ &intrinsics,
&dce,
&fold,
&type_propagation,
@@ -369,7 +371,7 @@
optimization->Run();
}
- if (depth_ + 1 < kDepthLimit) {
+ if (depth_ + 1 < compiler_driver_->GetCompilerOptions().GetInlineDepthLimit()) {
HInliner inliner(callee_graph,
outer_compilation_unit_,
dex_compilation_unit,
@@ -448,7 +450,33 @@
}
}
- callee_graph->InlineInto(graph_, invoke_instruction);
+ HInstruction* return_replacement = callee_graph->InlineInto(graph_, invoke_instruction);
+
+ // When merging the graph we might create a new NullConstant in the caller graph which does
+ // not have the chance to be typed. We assign the correct type here so that we can keep the
+ // assertion that every reference has a valid type. This also simplifies checks along the way.
+ HNullConstant* null_constant = graph_->GetNullConstant();
+ if (!null_constant->GetReferenceTypeInfo().IsValid()) {
+ ReferenceTypeInfo::TypeHandle obj_handle =
+ handles_->NewHandle(class_linker->GetClassRoot(ClassLinker::kJavaLangObject));
+ null_constant->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(obj_handle, false /* is_exact */));
+ }
+
+ if ((return_replacement != nullptr)
+ && (return_replacement->GetType() == Primitive::kPrimNot)) {
+ if (!return_replacement->GetReferenceTypeInfo().IsValid()) {
+ // Make sure that we have a valid type for the return. We may get an invalid one when
+ // we inline invokes with multiple branches and create a Phi for the result.
+ // TODO: we could be more precise by merging the phi inputs but that requires
+ // some functionality from the reference type propagation.
+ DCHECK(return_replacement->IsPhi());
+ ReferenceTypeInfo::TypeHandle return_handle =
+ handles_->NewHandle(resolved_method->GetReturnType());
+ return_replacement->SetReferenceTypeInfo(ReferenceTypeInfo::Create(
+ return_handle, return_handle->IsFinal() /* is_exact */));
+ }
+ }
return true;
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 1089812..d391145 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -195,6 +195,7 @@
// Returns whether doing a type test between the class of `object` against `klass` has
// a statically known outcome. The result of the test is stored in `outcome`.
static bool TypeCheckHasKnownOutcome(HLoadClass* klass, HInstruction* object, bool* outcome) {
+ DCHECK(!object->IsNullConstant()) << "Null constants should be special cased";
ReferenceTypeInfo obj_rti = object->GetReferenceTypeInfo();
ScopedObjectAccess soa(Thread::Current());
if (!obj_rti.IsValid()) {
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 8ef13e1..55e964e 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -103,6 +103,16 @@
LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
UNREACHABLE();
}
+ case kIntrinsicNumberOfLeadingZeros:
+ switch (GetType(method.d.data, true)) {
+ case Primitive::kPrimInt:
+ return Intrinsics::kIntegerNumberOfLeadingZeros;
+ case Primitive::kPrimLong:
+ return Intrinsics::kLongNumberOfLeadingZeros;
+ default:
+ LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
+ UNREACHABLE();
+ }
// Abs.
case kIntrinsicAbsDouble:
@@ -359,7 +369,7 @@
std::ostream& operator<<(std::ostream& os, const Intrinsics& intrinsic) {
switch (intrinsic) {
case Intrinsics::kNone:
- os << "No intrinsic.";
+ os << "None";
break;
#define OPTIMIZING_INTRINSICS(Name, IsStatic) \
case Intrinsics::k ## Name: \
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index b4dbf75..a797654 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -224,6 +224,48 @@
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
+static void GenNumberOfLeadingZeros(LocationSummary* locations,
+ Primitive::Type type,
+ ArmAssembler* assembler) {
+ Location in = locations->InAt(0);
+ Register out = locations->Out().AsRegister<Register>();
+
+ DCHECK((type == Primitive::kPrimInt) || (type == Primitive::kPrimLong));
+
+ if (type == Primitive::kPrimLong) {
+ Register in_reg_lo = in.AsRegisterPairLow<Register>();
+ Register in_reg_hi = in.AsRegisterPairHigh<Register>();
+ Label end;
+ __ clz(out, in_reg_hi);
+ __ CompareAndBranchIfNonZero(in_reg_hi, &end);
+ __ clz(out, in_reg_lo);
+ __ AddConstant(out, 32);
+ __ Bind(&end);
+ } else {
+ __ clz(out, in.AsRegister<Register>());
+ }
+}
+
+void IntrinsicLocationsBuilderARM::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
+ GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderARM::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
+ GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
+}
+
static void MathAbsFP(LocationSummary* locations, bool is64bit, ArmAssembler* assembler) {
Location in = locations->InAt(0);
Location out = locations->Out();
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 78ac167..2c93fea 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -260,6 +260,33 @@
GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetVIXLAssembler());
}
+static void GenNumberOfLeadingZeros(LocationSummary* locations,
+ Primitive::Type type,
+ vixl::MacroAssembler* masm) {
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+ Location in = locations->InAt(0);
+ Location out = locations->Out();
+
+ __ Clz(RegisterFrom(out, type), RegisterFrom(in, type));
+}
+
+void IntrinsicLocationsBuilderARM64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
+ GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetVIXLAssembler());
+}
+
+void IntrinsicLocationsBuilderARM64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
+ GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler());
+}
+
static void GenReverse(LocationSummary* locations,
Primitive::Type type,
vixl::MacroAssembler* masm) {
diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h
index 2c9248f..d28c5a3 100644
--- a/compiler/optimizing/intrinsics_list.h
+++ b/compiler/optimizing/intrinsics_list.h
@@ -27,8 +27,10 @@
V(FloatIntBitsToFloat, kStatic) \
V(IntegerReverse, kStatic) \
V(IntegerReverseBytes, kStatic) \
+ V(IntegerNumberOfLeadingZeros, kStatic) \
V(LongReverse, kStatic) \
V(LongReverseBytes, kStatic) \
+ V(LongNumberOfLeadingZeros, kStatic) \
V(ShortReverseBytes, kStatic) \
V(MathAbsDouble, kStatic) \
V(MathAbsFloat, kStatic) \
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 0d6ca09..993c005 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1756,6 +1756,8 @@
UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(IntegerNumberOfLeadingZeros)
+UNIMPLEMENTED_INTRINSIC(LongNumberOfLeadingZeros)
#undef UNIMPLEMENTED_INTRINSIC
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index ea342e9..62cdb4c 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -690,7 +690,7 @@
LocationSummary::kNoCall,
kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetOut(Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
return;
}
@@ -732,6 +732,9 @@
// And truncate to an integer.
__ roundss(inPlusPointFive, inPlusPointFive, Immediate(1));
+ // Load maxInt into out.
+ codegen_->Load64BitValue(out, kPrimIntMax);
+
// if inPlusPointFive >= maxInt goto done
__ comiss(inPlusPointFive, codegen_->LiteralFloatAddress(static_cast<float>(kPrimIntMax)));
__ j(kAboveEqual, &done);
@@ -776,6 +779,9 @@
// And truncate to an integer.
__ roundsd(inPlusPointFive, inPlusPointFive, Immediate(1));
+ // Load maxLong into out.
+ codegen_->Load64BitValue(out, kPrimLongMax);
+
// if inPlusPointFive >= maxLong goto done
__ comisd(inPlusPointFive, codegen_->LiteralDoubleAddress(static_cast<double>(kPrimLongMax)));
__ j(kAboveEqual, &done);
@@ -1615,6 +1621,8 @@
UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(IntegerNumberOfLeadingZeros)
+UNIMPLEMENTED_INTRINSIC(LongNumberOfLeadingZeros)
#undef UNIMPLEMENTED_INTRINSIC
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
new file mode 100644
index 0000000..2fc66e6d
--- /dev/null
+++ b/compiler/optimizing/licm_test.cc
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/arena_allocator.h"
+#include "builder.h"
+#include "gtest/gtest.h"
+#include "licm.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+#include "side_effects_analysis.h"
+
+namespace art {
+
+/**
+ * Fixture class for the LICM tests.
+ */
+class LICMTest : public testing::Test {
+ public:
+ LICMTest() : pool_(), allocator_(&pool_) {
+ graph_ = CreateGraph(&allocator_);
+ }
+
+ ~LICMTest() { }
+
+ // Builds a singly-nested loop structure in CFG. Tests can further populate
+ // the basic blocks with instructions to set up interesting scenarios.
+ void BuildLoop() {
+ entry_ = new (&allocator_) HBasicBlock(graph_);
+ loop_preheader_ = new (&allocator_) HBasicBlock(graph_);
+ loop_header_ = new (&allocator_) HBasicBlock(graph_);
+ loop_body_ = new (&allocator_) HBasicBlock(graph_);
+ exit_ = new (&allocator_) HBasicBlock(graph_);
+
+ graph_->AddBlock(entry_);
+ graph_->AddBlock(loop_preheader_);
+ graph_->AddBlock(loop_header_);
+ graph_->AddBlock(loop_body_);
+ graph_->AddBlock(exit_);
+
+ graph_->SetEntryBlock(entry_);
+ graph_->SetExitBlock(exit_);
+
+ // Set up loop flow in CFG.
+ entry_->AddSuccessor(loop_preheader_);
+ loop_preheader_->AddSuccessor(loop_header_);
+ loop_header_->AddSuccessor(loop_body_);
+ loop_header_->AddSuccessor(exit_);
+ loop_body_->AddSuccessor(loop_header_);
+
+ // Provide boiler-plate instructions.
+ parameter_ = new (&allocator_) HParameterValue(0, Primitive::kPrimNot);
+ entry_->AddInstruction(parameter_);
+ constant_ = new (&allocator_) HConstant(Primitive::kPrimInt);
+ loop_preheader_->AddInstruction(constant_);
+ loop_header_->AddInstruction(new (&allocator_) HIf(parameter_));
+ loop_body_->AddInstruction(new (&allocator_) HGoto());
+ exit_->AddInstruction(new (&allocator_) HExit());
+ }
+
+ // Performs LICM optimizations (after proper set up).
+ void PerformLICM() {
+ ASSERT_TRUE(graph_->TryBuildingSsa());
+ SideEffectsAnalysis side_effects(graph_);
+ side_effects.Run();
+ LICM licm(graph_, side_effects);
+ licm.Run();
+ }
+
+ // General building fields.
+ ArenaPool pool_;
+ ArenaAllocator allocator_;
+ HGraph* graph_;
+
+ // Specific basic blocks.
+ HBasicBlock* entry_;
+ HBasicBlock* loop_preheader_;
+ HBasicBlock* loop_header_;
+ HBasicBlock* loop_body_;
+ HBasicBlock* exit_;
+
+ HInstruction* parameter_; // "this"
+ HInstruction* constant_;
+};
+
+//
+// The actual LICM tests.
+//
+
+TEST_F(LICMTest, ConstantHoisting) {
+ BuildLoop();
+
+ // Populate the loop with instructions: set array to constant.
+ HInstruction* constant = new (&allocator_) HConstant(Primitive::kPrimDouble);
+ loop_body_->InsertInstructionBefore(constant, loop_body_->GetLastInstruction());
+ HInstruction* set_array = new (&allocator_) HArraySet(
+ parameter_, constant_, constant, Primitive::kPrimDouble, 0);
+ loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
+
+ EXPECT_EQ(constant->GetBlock(), loop_body_);
+ EXPECT_EQ(set_array->GetBlock(), loop_body_);
+ PerformLICM();
+ EXPECT_EQ(constant->GetBlock(), loop_preheader_);
+ EXPECT_EQ(set_array->GetBlock(), loop_body_);
+}
+
+TEST_F(LICMTest, FieldHoisting) {
+ BuildLoop();
+
+ // Populate the loop with instructions: set/get field with different types.
+ HInstruction* get_field = new (&allocator_) HInstanceFieldGet(
+ parameter_, Primitive::kPrimLong, MemberOffset(10),
+ false, kUnknownFieldIndex, graph_->GetDexFile());
+ loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
+ HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
+ parameter_, constant_, Primitive::kPrimInt, MemberOffset(20),
+ false, kUnknownFieldIndex, graph_->GetDexFile());
+ loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
+
+ EXPECT_EQ(get_field->GetBlock(), loop_body_);
+ EXPECT_EQ(set_field->GetBlock(), loop_body_);
+ PerformLICM();
+ EXPECT_EQ(get_field->GetBlock(), loop_preheader_);
+ EXPECT_EQ(set_field->GetBlock(), loop_body_);
+}
+
+TEST_F(LICMTest, NoFieldHoisting) {
+ BuildLoop();
+
+ // Populate the loop with instructions: set/get field with same types.
+ HInstruction* get_field = new (&allocator_) HInstanceFieldGet(
+ parameter_, Primitive::kPrimLong, MemberOffset(10),
+ false, kUnknownFieldIndex, graph_->GetDexFile());
+ loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
+ HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
+ parameter_, get_field, Primitive::kPrimLong, MemberOffset(10),
+ false, kUnknownFieldIndex, graph_->GetDexFile());
+ loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
+
+ EXPECT_EQ(get_field->GetBlock(), loop_body_);
+ EXPECT_EQ(set_field->GetBlock(), loop_body_);
+ PerformLICM();
+ EXPECT_EQ(get_field->GetBlock(), loop_body_);
+ EXPECT_EQ(set_field->GetBlock(), loop_body_);
+}
+
+TEST_F(LICMTest, ArrayHoisting) {
+ BuildLoop();
+
+ // Populate the loop with instructions: set/get array with different types.
+ HInstruction* get_array = new (&allocator_) HArrayGet(
+ parameter_, constant_, Primitive::kPrimLong);
+ loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
+ HInstruction* set_array = new (&allocator_) HArraySet(
+ parameter_, constant_, constant_, Primitive::kPrimInt, 0);
+ loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
+
+ EXPECT_EQ(get_array->GetBlock(), loop_body_);
+ EXPECT_EQ(set_array->GetBlock(), loop_body_);
+ PerformLICM();
+ EXPECT_EQ(get_array->GetBlock(), loop_preheader_);
+ EXPECT_EQ(set_array->GetBlock(), loop_body_);
+}
+
+TEST_F(LICMTest, NoArrayHoisting) {
+ BuildLoop();
+
+ // Populate the loop with instructions: set/get array with same types.
+ HInstruction* get_array = new (&allocator_) HArrayGet(
+ parameter_, constant_, Primitive::kPrimLong);
+ loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
+ HInstruction* set_array = new (&allocator_) HArraySet(
+ parameter_, get_array, constant_, Primitive::kPrimLong, 0);
+ loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
+
+ EXPECT_EQ(get_array->GetBlock(), loop_body_);
+ EXPECT_EQ(set_array->GetBlock(), loop_body_);
+ PerformLICM();
+ EXPECT_EQ(get_array->GetBlock(), loop_body_);
+ EXPECT_EQ(set_array->GetBlock(), loop_body_);
+}
+
+} // namespace art
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index efaf48c..d7795f9 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -564,6 +564,13 @@
return false;
}
+void HBasicBlock::AddExceptionalPredecessor(HInstruction* exceptional_predecessor) {
+ DCHECK(exceptional_predecessor->CanThrow());
+ DCHECK(exceptional_predecessor->GetBlock()->IsInTry());
+ DCHECK(exceptional_predecessor->GetBlock()->GetTryEntry()->HasExceptionHandler(*this));
+ exceptional_predecessors_.Add(exceptional_predecessor);
+}
+
static void UpdateInputsUsers(HInstruction* instruction) {
for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
instruction->InputAt(i)->AddUseAt(instruction, i);
@@ -1221,10 +1228,12 @@
return false;
}
- // Exception handler lists cannot contain duplicates, which makes it
- // sufficient to test inclusion only in one direction.
- for (HExceptionHandlerIterator it(other); !it.Done(); it.Advance()) {
- if (!HasExceptionHandler(*it.Current())) {
+ // Exception handlers need to be stored in the same order.
+ for (HExceptionHandlerIterator it1(*this), it2(other);
+ !it1.Done();
+ it1.Advance(), it2.Advance()) {
+ DCHECK(!it2.Done());
+ if (it1.Current() != it2.Current()) {
return false;
}
}
@@ -1481,7 +1490,7 @@
blocks_.Put(block->GetBlockId(), nullptr);
}
-void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
+HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
DCHECK(HasExitBlock()) << "Unimplemented scenario";
// Update the environments in this graph to have the invoke's environment
// as parent.
@@ -1506,6 +1515,7 @@
outer_graph->SetHasBoundsChecks(true);
}
+ HInstruction* return_value = nullptr;
if (GetBlocks().Size() == 3) {
// Simple case of an entry block, a body block, and an exit block.
// Put the body block's instruction into `invoke`'s block.
@@ -1520,7 +1530,8 @@
// Replace the invoke with the return value of the inlined graph.
if (last->IsReturn()) {
- invoke->ReplaceWith(last->InputAt(0));
+ return_value = last->InputAt(0);
+ invoke->ReplaceWith(return_value);
} else {
DCHECK(last->IsReturnVoid());
}
@@ -1542,7 +1553,6 @@
// Update all predecessors of the exit block (now the `to` block)
// to not `HReturn` but `HGoto` instead.
- HInstruction* return_value = nullptr;
bool returns_void = to->GetPredecessors().Get(0)->GetLastInstruction()->IsReturnVoid();
if (to->GetPredecessors().Size() == 1) {
HBasicBlock* predecessor = to->GetPredecessors().Get(0);
@@ -1676,6 +1686,8 @@
// Finally remove the invoke from the caller.
invoke->GetBlock()->RemoveInstruction(invoke);
+
+ return return_value;
}
/*
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 1190fae..638ac95 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -58,6 +58,7 @@
static const int kDefaultNumberOfBlocks = 8;
static const int kDefaultNumberOfSuccessors = 2;
static const int kDefaultNumberOfPredecessors = 2;
+static const int kDefaultNumberOfExceptionalPredecessors = 0;
static const int kDefaultNumberOfDominatedBlocks = 1;
static const int kDefaultNumberOfBackEdges = 1;
@@ -210,7 +211,9 @@
void ComputeTryBlockInformation();
// Inline this graph in `outer_graph`, replacing the given `invoke` instruction.
- void InlineInto(HGraph* outer_graph, HInvoke* invoke);
+ // Returns the instruction used to replace the invoke expression or null if the
+ // invoke is for a void method.
+ HInstruction* InlineInto(HGraph* outer_graph, HInvoke* invoke);
// Need to add a couple of blocks to test if the loop body is entered and
// put deoptimization instructions, etc.
@@ -306,7 +309,12 @@
// already, it is created and inserted into the graph. This method is only for
// integral types.
HConstant* GetConstant(Primitive::Type type, int64_t value);
+
+ // TODO: This is problematic for the consistency of reference type propagation
+ // because it can be created anytime after the pass and thus it will be left
+ // with an invalid type.
HNullConstant* GetNullConstant();
+
HIntConstant* GetIntConstant(int32_t value) {
return CreateConstant(value, &cached_int_constants_);
}
@@ -557,6 +565,7 @@
explicit HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc)
: graph_(graph),
predecessors_(graph->GetArena(), kDefaultNumberOfPredecessors),
+ exceptional_predecessors_(graph->GetArena(), kDefaultNumberOfExceptionalPredecessors),
successors_(graph->GetArena(), kDefaultNumberOfSuccessors),
loop_information_(nullptr),
dominator_(nullptr),
@@ -571,6 +580,10 @@
return predecessors_;
}
+ const GrowableArray<HInstruction*>& GetExceptionalPredecessors() const {
+ return exceptional_predecessors_;
+ }
+
const GrowableArray<HBasicBlock*>& GetSuccessors() const {
return successors_;
}
@@ -639,6 +652,8 @@
HInstruction* GetLastPhi() const { return phis_.last_instruction_; }
const HInstructionList& GetPhis() const { return phis_; }
+ void AddExceptionalPredecessor(HInstruction* exceptional_predecessor);
+
void AddSuccessor(HBasicBlock* block) {
successors_.Add(block);
block->predecessors_.Add(this);
@@ -678,6 +693,10 @@
predecessors_.Delete(block);
}
+ void RemoveExceptionalPredecessor(HInstruction* instruction) {
+ exceptional_predecessors_.Delete(instruction);
+ }
+
void RemoveSuccessor(HBasicBlock* block) {
successors_.Delete(block);
}
@@ -714,6 +733,15 @@
return -1;
}
+ size_t GetExceptionalPredecessorIndexOf(HInstruction* exceptional_predecessor) const {
+ for (size_t i = 0, e = exceptional_predecessors_.Size(); i < e; ++i) {
+ if (exceptional_predecessors_.Get(i) == exceptional_predecessor) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
size_t GetSuccessorIndexOf(HBasicBlock* successor) const {
for (size_t i = 0, e = successors_.Size(); i < e; ++i) {
if (successors_.Get(i) == successor) {
@@ -874,6 +902,7 @@
private:
HGraph* graph_;
GrowableArray<HBasicBlock*> predecessors_;
+ GrowableArray<HInstruction*> exceptional_predecessors_;
GrowableArray<HBasicBlock*> successors_;
HInstructionList instructions_;
HInstructionList phis_;
@@ -932,6 +961,7 @@
M(BoundsCheck, Instruction) \
M(BoundType, Instruction) \
M(CheckCast, Instruction) \
+ M(ClearException, Instruction) \
M(ClinitCheck, Instruction) \
M(Compare, BinaryOperation) \
M(Condition, BinaryOperation) \
@@ -1467,26 +1497,27 @@
static ReferenceTypeInfo CreateInvalid() { return ReferenceTypeInfo(); }
- static bool IsValidHandle(TypeHandle handle) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static bool IsValidHandle(TypeHandle handle) SHARED_REQUIRES(Locks::mutator_lock_) {
return handle.GetReference() != nullptr;
}
- bool IsValid() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsValid() const SHARED_REQUIRES(Locks::mutator_lock_) {
return IsValidHandle(type_handle_);
}
bool IsExact() const { return is_exact_; }
- bool IsObjectClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+
+ bool IsObjectClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsObjectClass();
}
- bool IsInterface() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsInterface() const SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsInterface();
}
Handle<mirror::Class> GetTypeHandle() const { return type_handle_; }
- bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsValid());
DCHECK(rti.IsValid());
return GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
@@ -1495,7 +1526,7 @@
// Returns true if the type information provide the same amount of details.
// Note that it does not mean that the instructions have the same actual type
// (because the type can be the result of a merge).
- bool IsEqual(ReferenceTypeInfo rti) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsEqual(ReferenceTypeInfo rti) SHARED_REQUIRES(Locks::mutator_lock_) {
if (!IsValid() && !rti.IsValid()) {
// Invalid types are equal.
return true;
@@ -3589,7 +3620,7 @@
const DexFile& dex_file)
: HExpression(
field_type,
- SideEffects::SideEffects::FieldReadOfType(field_type, is_volatile)),
+ SideEffects::FieldReadOfType(field_type, is_volatile)),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file) {
SetRawInputAt(0, value);
}
@@ -4036,7 +4067,7 @@
const DexFile& dex_file)
: HExpression(
field_type,
- SideEffects::SideEffects::FieldReadOfType(field_type, is_volatile)),
+ SideEffects::FieldReadOfType(field_type, is_volatile)),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file) {
SetRawInputAt(0, cls);
}
@@ -4112,6 +4143,18 @@
DISALLOW_COPY_AND_ASSIGN(HLoadException);
};
+// Implicit part of move-exception which clears thread-local exception storage.
+// Must not be removed because the runtime expects the TLS to get cleared.
+class HClearException : public HTemplateInstruction<0> {
+ public:
+ HClearException() : HTemplateInstruction(SideEffects::AllWrites()) {}
+
+ DECLARE_INSTRUCTION(ClearException);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HClearException);
+};
+
class HThrow : public HTemplateInstruction<1> {
public:
HThrow(HInstruction* exception, uint32_t dex_pc)
@@ -4184,7 +4227,8 @@
HBoundType(HInstruction* input, ReferenceTypeInfo upper_bound, bool upper_can_be_null)
: HExpression(Primitive::kPrimNot, SideEffects::None()),
upper_bound_(upper_bound),
- upper_can_be_null_(upper_can_be_null) {
+ upper_can_be_null_(upper_can_be_null),
+ can_be_null_(upper_can_be_null) {
DCHECK_EQ(input->GetType(), Primitive::kPrimNot);
SetRawInputAt(0, input);
SetReferenceTypeInfo(upper_bound_);
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index fe3bb1a..f455571 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -29,7 +29,7 @@
namespace art {
// Run the tests only on host.
-#ifndef HAVE_ANDROID_OS
+#ifndef __ANDROID__
class OptimizingCFITest : public CFITest {
public:
@@ -125,6 +125,6 @@
TEST_ISA(kX86)
TEST_ISA(kX86_64)
-#endif // HAVE_ANDROID_OS
+#endif // __ANDROID__
} // namespace art
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 601d668..6a50b7d 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -86,7 +86,7 @@
* Filter to apply to the visualizer. Methods whose name contain that filter will
* be dumped.
*/
-static const char* kStringFilter = "";
+static constexpr const char kStringFilter[] = "";
class PassScope;
@@ -105,12 +105,14 @@
visualizer_enabled_(!compiler_driver->GetDumpCfgFileName().empty()),
visualizer_(visualizer_output, graph, *codegen),
graph_in_bad_state_(false) {
- if (strstr(method_name, kStringFilter) == nullptr) {
- timing_logger_enabled_ = visualizer_enabled_ = false;
- }
- if (visualizer_enabled_) {
- visualizer_.PrintHeader(method_name_);
- codegen->SetDisassemblyInformation(&disasm_info_);
+ if (timing_logger_enabled_ || visualizer_enabled_) {
+ if (!IsVerboseMethod(compiler_driver, method_name)) {
+ timing_logger_enabled_ = visualizer_enabled_ = false;
+ }
+ if (visualizer_enabled_) {
+ visualizer_.PrintHeader(method_name_);
+ codegen->SetDisassemblyInformation(&disasm_info_);
+ }
}
}
@@ -169,6 +171,23 @@
}
}
+ static bool IsVerboseMethod(CompilerDriver* compiler_driver, const char* method_name) {
+ // Test an exact match to --verbose-methods. If verbose-methods is set, this overrides an
+ // empty kStringFilter matching all methods.
+ if (compiler_driver->GetCompilerOptions().HasVerboseMethods()) {
+ return compiler_driver->GetCompilerOptions().IsVerboseMethod(method_name);
+ }
+
+ // Test the kStringFilter sub-string. constexpr helper variable to silence unreachable-code
+ // warning when the string is empty.
+ constexpr bool kStringFilterEmpty = arraysize(kStringFilter) <= 1;
+ if (kStringFilterEmpty || strstr(method_name, kStringFilter) != nullptr) {
+ return true;
+ }
+
+ return false;
+ }
+
HGraph* const graph_;
const char* method_name_;
@@ -237,7 +256,7 @@
}
uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
}
@@ -350,6 +369,36 @@
}
}
+static void MaybeRunInliner(HGraph* graph,
+ CompilerDriver* driver,
+ OptimizingCompilerStats* stats,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ StackHandleScopeCollection* handles) {
+ const CompilerOptions& compiler_options = driver->GetCompilerOptions();
+ bool should_inline = (compiler_options.GetInlineDepthLimit() > 0)
+ && (compiler_options.GetInlineMaxCodeUnits() > 0);
+ if (!should_inline) {
+ return;
+ }
+
+ ArenaAllocator* arena = graph->GetArena();
+ HInliner* inliner = new (arena) HInliner(
+ graph, dex_compilation_unit, dex_compilation_unit, driver, handles, stats);
+ ReferenceTypePropagation* type_propagation =
+ new (arena) ReferenceTypePropagation(graph, handles,
+ "reference_type_propagation_after_inlining");
+
+ HOptimization* optimizations[] = {
+ inliner,
+ // Run another type propagation phase: inlining will open up more opportunities
+ // to remove checkcast/instanceof and null checks.
+ type_propagation,
+ };
+
+ RunOptimizations(optimizations, arraysize(optimizations), pass_observer);
+}
+
static void RunOptimizations(HGraph* graph,
CompilerDriver* driver,
OptimizingCompilerStats* stats,
@@ -364,10 +413,6 @@
HConstantFolding* fold1 = new (arena) HConstantFolding(graph);
InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats);
HBooleanSimplifier* boolean_simplify = new (arena) HBooleanSimplifier(graph);
-
- HInliner* inliner = new (arena) HInliner(
- graph, dex_compilation_unit, dex_compilation_unit, driver, handles, stats);
-
HConstantFolding* fold2 = new (arena) HConstantFolding(graph, "constant_folding_after_inlining");
SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects);
@@ -379,29 +424,29 @@
graph, stats, "instruction_simplifier_after_types");
InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
graph, stats, "instruction_simplifier_after_bce");
- ReferenceTypePropagation* type_propagation2 =
- new (arena) ReferenceTypePropagation(
- graph, handles, "reference_type_propagation_after_inlining");
InstructionSimplifier* simplify4 = new (arena) InstructionSimplifier(
graph, stats, "instruction_simplifier_before_codegen");
IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, driver);
- HOptimization* optimizations[] = {
+ HOptimization* optimizations1[] = {
intrinsics,
fold1,
simplify1,
type_propagation,
dce1,
- simplify2,
- inliner,
- // Run another type propagation phase: inlining will open up more opprotunities
- // to remove checkast/instanceof and null checks.
- type_propagation2,
+ simplify2
+ };
+
+ RunOptimizations(optimizations1, arraysize(optimizations1), pass_observer);
+
+ MaybeRunInliner(graph, driver, stats, dex_compilation_unit, pass_observer, handles);
+
+ HOptimization* optimizations2[] = {
// BooleanSimplifier depends on the InstructionSimplifier removing redundant
// suspend checks to recognize empty blocks.
boolean_simplify,
- fold2,
+ fold2, // TODO: if we don't inline we can also skip fold2.
side_effects,
gvn,
licm,
@@ -414,7 +459,7 @@
simplify4,
};
- RunOptimizations(optimizations, arraysize(optimizations), pass_observer);
+ RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
}
// The stack map we generate must be 4-byte aligned on ARM. Since existing
diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc
index 54ea6f1..f9d812f 100644
--- a/compiler/optimizing/parallel_move_resolver.cc
+++ b/compiler/optimizing/parallel_move_resolver.cc
@@ -38,6 +38,20 @@
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
+ // Move stack/stack slot to take advantage of a free register on constrained machines.
+ for (size_t i = 0; i < moves_.Size(); ++i) {
+ const MoveOperands& move = *moves_.Get(i);
+ // Ignore constants and moves already eliminated.
+ if (move.IsEliminated() || move.GetSource().IsConstant()) {
+ continue;
+ }
+
+ if ((move.GetSource().IsStackSlot() || move.GetSource().IsDoubleStackSlot()) &&
+ (move.GetDestination().IsStackSlot() || move.GetDestination().IsDoubleStackSlot())) {
+ PerformMove(i);
+ }
+ }
+
for (size_t i = 0; i < moves_.Size(); ++i) {
const MoveOperands& move = *moves_.Get(i);
// Skip constants to perform them last. They don't block other moves
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index d11a441..d1c1134 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -153,7 +153,7 @@
HInstruction* obj,
HLoadClass* load_class,
bool upper_can_be_null)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo();
ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
HBoundType* bound_type = new (arena) HBoundType(obj, class_rti, upper_can_be_null);
@@ -180,7 +180,7 @@
ReferenceTypeInfo upper_bound,
HInstruction* dominator_instr,
HBasicBlock* dominator_block)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// If the position where we should insert the bound type is not already a
// a bound type then we need to create one.
if (position == nullptr || !position->IsBoundType()) {
@@ -388,6 +388,8 @@
}
void RTPVisitor::VisitNullConstant(HNullConstant* instr) {
+ // TODO: The null constant could be bound contextually (e.g. based on return statements)
+ // to a more precise type.
instr->SetReferenceTypeInfo(
ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false));
}
@@ -438,8 +440,10 @@
Runtime::Current()->GetClassLinker()->FindDexCache(instr->GetDexFile());
// Get type from dex cache assuming it was populated by the verifier.
mirror::Class* resolved_class = dex_cache->GetResolvedType(instr->GetTypeIndex());
- DCHECK(resolved_class != nullptr);
- ReferenceTypeInfo::TypeHandle handle = handles_->NewHandle(resolved_class);
+ // TODO: investigating why we are still getting unresolved classes: b/22821472.
+ ReferenceTypeInfo::TypeHandle handle = (resolved_class != nullptr)
+ ? handles_->NewHandle(resolved_class)
+ : object_class_handle_;
instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(handle, /* is_exact */ true));
instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(class_class_handle_, /* is_exact */ true));
}
@@ -546,7 +550,7 @@
static void UpdateArrayGet(HArrayGet* instr,
StackHandleScopeCollection* handles,
ReferenceTypeInfo::TypeHandle object_class_handle)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK_EQ(Primitive::kPrimNot, instr->GetType());
ReferenceTypeInfo parent_rti = instr->InputAt(0)->GetReferenceTypeInfo();
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index 9196b56..14d4a82 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -41,8 +41,8 @@
private:
void VisitPhi(HPhi* phi);
void VisitBasicBlock(HBasicBlock* block);
- void UpdateBoundType(HBoundType* bound_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void UpdatePhi(HPhi* phi) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void UpdateBoundType(HBoundType* bound_type) SHARED_REQUIRES(Locks::mutator_lock_);
+ void UpdatePhi(HPhi* phi) SHARED_REQUIRES(Locks::mutator_lock_);
void BoundTypeForIfNotNull(HBasicBlock* block);
void BoundTypeForIfInstanceOf(HBasicBlock* block);
void ProcessWorklist();
@@ -53,7 +53,7 @@
bool UpdateReferenceTypeInfo(HInstruction* instr);
ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a, const ReferenceTypeInfo& b)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
StackHandleScopeCollection* handles_;
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index ff2e6ad..2c34e4d 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -570,7 +570,9 @@
if (instruction->GetBlock()->IsInTry() && instruction->CanThrow()) {
HTryBoundary* try_block = instruction->GetBlock()->GetTryEntry();
for (HExceptionHandlerIterator it(*try_block); !it.Done(); it.Advance()) {
- GrowableArray<HInstruction*>* handler_locals = GetLocalsFor(it.Current());
+ HBasicBlock* handler = it.Current();
+ handler->AddExceptionalPredecessor(instruction);
+ GrowableArray<HInstruction*>* handler_locals = GetLocalsFor(handler);
for (size_t i = 0, e = current_locals_->Size(); i < e; ++i) {
HInstruction* local_value = current_locals_->Get(i);
if (local_value != nullptr) {
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 701dbb0..40502c1 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -225,7 +225,7 @@
// SsaLivenessAnalysis.
for (size_t i = 0, e = environment->Size(); i < e; ++i) {
HInstruction* instruction = environment->GetInstructionAt(i);
- bool should_be_live = ShouldBeLiveForEnvironment(instruction);
+ bool should_be_live = ShouldBeLiveForEnvironment(current, instruction);
if (should_be_live) {
DCHECK(instruction->HasSsaIndex());
live_in->SetBit(instruction->GetSsaIndex());
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 220ee6a..a7044de 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -1201,8 +1201,14 @@
// Update the live_out set of the block and returns whether it has changed.
bool UpdateLiveOut(const HBasicBlock& block);
- static bool ShouldBeLiveForEnvironment(HInstruction* instruction) {
+ // Returns whether `instruction` in an HEnvironment held by `env_holder`
+ // should be kept live by the HEnvironment.
+ static bool ShouldBeLiveForEnvironment(HInstruction* env_holder,
+ HInstruction* instruction) {
if (instruction == nullptr) return false;
+ // A value that's not live in compiled code may still be needed in interpreter,
+ // due to code motion, etc.
+ if (env_holder->IsDeoptimize()) return true;
if (instruction->GetBlock()->GetGraph()->IsDebuggable()) return true;
return instruction->GetType() == Primitive::kPrimNot;
}
diff --git a/compiler/trampolines/trampoline_compiler.h b/compiler/trampolines/trampoline_compiler.h
index bdab279..9fb2245 100644
--- a/compiler/trampolines/trampoline_compiler.h
+++ b/compiler/trampolines/trampoline_compiler.h
@@ -27,10 +27,10 @@
// Create code that will invoke the function held in thread local storage.
const std::vector<uint8_t>* CreateTrampoline32(InstructionSet isa, EntryPointCallingConvention abi,
ThreadOffset<4> entry_point_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateTrampoline64(InstructionSet isa, EntryPointCallingConvention abi,
ThreadOffset<8> entry_point_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
} // namespace art
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 413b9ea..b499ddd 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -133,14 +133,27 @@
AdjustFixupIfNeeded(&fixup, ¤t_code_size, &fixups_to_recalculate);
}
while (!fixups_to_recalculate.empty()) {
- // Pop the fixup.
- FixupId fixup_id = fixups_to_recalculate.front();
- fixups_to_recalculate.pop_front();
- Fixup* fixup = GetFixup(fixup_id);
- DCHECK_NE(buffer_.Load<int16_t>(fixup->GetLocation()), 0);
- buffer_.Store<int16_t>(fixup->GetLocation(), 0);
- // See if it needs adjustment.
- AdjustFixupIfNeeded(fixup, ¤t_code_size, &fixups_to_recalculate);
+ do {
+ // Pop the fixup.
+ FixupId fixup_id = fixups_to_recalculate.front();
+ fixups_to_recalculate.pop_front();
+ Fixup* fixup = GetFixup(fixup_id);
+ DCHECK_NE(buffer_.Load<int16_t>(fixup->GetLocation()), 0);
+ buffer_.Store<int16_t>(fixup->GetLocation(), 0);
+ // See if it needs adjustment.
+ AdjustFixupIfNeeded(fixup, ¤t_code_size, &fixups_to_recalculate);
+ } while (!fixups_to_recalculate.empty());
+
+ if ((current_code_size & 2) != 0 && !literals_.empty()) {
+ // If we need to add padding before literals, this may just push some out of range,
+ // so recalculate all load literals. This makes up for the fact that we don't mark
+ // load literal as a dependency of all previous Fixups even though it actually is.
+ for (Fixup& fixup : fixups_) {
+ if (fixup.IsLoadLiteral()) {
+ AdjustFixupIfNeeded(&fixup, ¤t_code_size, &fixups_to_recalculate);
+ }
+ }
+ }
}
if (kIsDebugBuild) {
// Check that no fixup is marked as being in fixups_to_recalculate anymore.
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 838554e..41eb5d3 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -489,6 +489,10 @@
return type_;
}
+ bool IsLoadLiteral() const {
+ return GetType() >= kLoadLiteralNarrow;
+ }
+
Size GetOriginalSize() const {
return original_size_;
}
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
index 68b7931..84f5cb1 100644
--- a/compiler/utils/arm/assembler_thumb2_test.cc
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -950,4 +950,73 @@
__ GetAdjustedPosition(label.Position()));
}
+TEST_F(AssemblerThumb2Test, LoadLiteralBeyondMax1KiBDueToAlignmentOnSecondPass) {
+ // First part: as TwoCbzBeyondMaxOffset but add one 16-bit instruction to the end,
+ // so that the size is not Aligned<4>(.). On the first pass, the assembler resizes
+ // the second CBZ because it's out of range, then it will resize the first CBZ
+ // which has been pushed out of range. Thus, after the first pass, the code size
+ // will appear Aligned<4>(.) but the final size will not be.
+ Label label0, label1, label2;
+ __ cbz(arm::R0, &label1);
+ constexpr size_t kLdrR0R0Count1 = 63;
+ for (size_t i = 0; i != kLdrR0R0Count1; ++i) {
+ __ ldr(arm::R0, arm::Address(arm::R0));
+ }
+ __ Bind(&label0);
+ __ cbz(arm::R0, &label2);
+ __ Bind(&label1);
+ constexpr size_t kLdrR0R0Count2 = 65;
+ for (size_t i = 0; i != kLdrR0R0Count2; ++i) {
+ __ ldr(arm::R0, arm::Address(arm::R0));
+ }
+ __ Bind(&label2);
+ __ ldr(arm::R0, arm::Address(arm::R0));
+
+ std::string expected_part1 =
+ "cmp r0, #0\n" // cbz r0, label1
+ "beq.n 1f\n" +
+ RepeatInsn(kLdrR0R0Count1, "ldr r0, [r0]\n") +
+ "0:\n"
+ "cmp r0, #0\n" // cbz r0, label2
+ "beq.n 2f\n"
+ "1:\n" +
+ RepeatInsn(kLdrR0R0Count2, "ldr r0, [r0]\n") +
+ "2:\n" // Here the offset is Aligned<4>(.).
+ "ldr r0, [r0]\n"; // Make the first part
+
+ // Second part: as LoadLiteralMax1KiB with the caveat that the offset of the load
+ // literal will not be Aligned<4>(.) but it will appear to be when we process the
+ // instruction during the first pass, so the literal will need a padding and it
+ // will push the literal out of range, so we shall end up with "ldr.w".
+ arm::Literal* literal = __ NewLiteral<int32_t>(0x12345678);
+ __ LoadLiteral(arm::R0, literal);
+ Label label;
+ __ Bind(&label);
+ constexpr size_t kLdrR0R0Count = 511;
+ for (size_t i = 0; i != kLdrR0R0Count; ++i) {
+ __ ldr(arm::R0, arm::Address(arm::R0));
+ }
+
+ std::string expected =
+ expected_part1 +
+ "1:\n"
+ "ldr.w r0, [pc, #((2f - 1b - 2) & ~2)]\n" +
+ RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
+ ".align 2, 0\n"
+ "2:\n"
+ ".word 0x12345678\n";
+ DriverStr(expected, "LoadLiteralMax1KiB");
+
+ EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 6u,
+ __ GetAdjustedPosition(label.Position()));
+}
+
+TEST_F(AssemblerThumb2Test, Clz) {
+ __ clz(arm::R0, arm::R1);
+
+ const char* expected = "clz r0, r1\n";
+
+ DriverStr(expected, "clz");
+}
+
} // namespace art
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 20f61f9..cb01cea 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -32,7 +32,7 @@
// Include results file (generated manually)
#include "assembler_thumb_test_expected.cc.inc"
-#ifndef HAVE_ANDROID_OS
+#ifndef __ANDROID__
// This controls whether the results are printed to the
// screen or compared against the expected output.
// To generate new expected output, set this to true and
@@ -72,7 +72,7 @@
}
std::string GetToolsDir() {
-#ifndef HAVE_ANDROID_OS
+#ifndef __ANDROID__
// This will only work on the host. There is no as, objcopy or objdump on the device.
static std::string toolsdir;
@@ -89,7 +89,7 @@
}
void DumpAndCheck(std::vector<uint8_t>& code, const char* testname, const char* const* results) {
-#ifndef HAVE_ANDROID_OS
+#ifndef __ANDROID__
static std::string toolsdir = GetToolsDir();
ScratchFile file;
diff --git a/compiler/utils/swap_space.cc b/compiler/utils/swap_space.cc
index 325ee4f..42ed881 100644
--- a/compiler/utils/swap_space.cc
+++ b/compiler/utils/swap_space.cc
@@ -143,7 +143,6 @@
LOG(ERROR) << "Unable to mmap new swap file chunk.";
LOG(ERROR) << "Current size: " << size_ << " requested: " << next_part << "/" << min_size;
LOG(ERROR) << "Free list:";
- MutexLock lock(Thread::Current(), lock_);
DumpFreeMap(free_by_size_);
LOG(ERROR) << "In free list: " << CollectFree(free_by_start_, free_by_size_);
LOG(FATAL) << "Aborting...";
diff --git a/compiler/utils/swap_space.h b/compiler/utils/swap_space.h
index 691df4a..f7c772d 100644
--- a/compiler/utils/swap_space.h
+++ b/compiler/utils/swap_space.h
@@ -60,15 +60,15 @@
public:
SwapSpace(int fd, size_t initial_size);
~SwapSpace();
- void* Alloc(size_t size) LOCKS_EXCLUDED(lock_);
- void Free(void* ptr, size_t size) LOCKS_EXCLUDED(lock_);
+ void* Alloc(size_t size) REQUIRES(!lock_);
+ void Free(void* ptr, size_t size) REQUIRES(!lock_);
size_t GetSize() {
return size_;
}
private:
- SpaceChunk NewFileChunk(size_t min_size);
+ SpaceChunk NewFileChunk(size_t min_size) REQUIRES(lock_);
int fd_;
size_t size_;
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index fa85ada..8c2a3ed 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -145,6 +145,13 @@
EmitLabel(lbl, dst.length_ + 5);
}
+void X86Assembler::movntl(const Address& dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xC3);
+ EmitOperand(src, dst);
+}
+
void X86Assembler::bswapl(Register dst) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x0F);
@@ -1194,11 +1201,26 @@
}
-void X86Assembler::imull(Register reg, const Immediate& imm) {
+void X86Assembler::imull(Register dst, Register src, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(0x69);
- EmitOperand(reg, Operand(reg));
- EmitImmediate(imm);
+ // See whether imm can be represented as a sign-extended 8bit value.
+ int32_t v32 = static_cast<int32_t>(imm.value());
+ if (IsInt<8>(v32)) {
+ // Sign-extension works.
+ EmitUint8(0x6B);
+ EmitOperand(dst, Operand(src));
+ EmitUint8(static_cast<uint8_t>(v32 & 0xFF));
+ } else {
+ // Not representable, use full immediate.
+ EmitUint8(0x69);
+ EmitOperand(dst, Operand(src));
+ EmitImmediate(imm);
+ }
+}
+
+
+void X86Assembler::imull(Register reg, const Immediate& imm) {
+ imull(reg, reg, imm);
}
@@ -1515,6 +1537,21 @@
}
+void X86Assembler::repe_cmpsw() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0xF3);
+ EmitUint8(0xA7);
+}
+
+
+void X86Assembler::repe_cmpsl() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0xA7);
+}
+
+
X86Assembler* X86Assembler::lock() {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF0);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index d1b4e1d..d9c1b40 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -231,6 +231,8 @@
void movl(const Address& dst, const Immediate& imm);
void movl(const Address& dst, Label* lbl);
+ void movntl(const Address& dst, Register src);
+
void bswapl(Register dst);
void movzxb(Register dst, ByteRegister src);
@@ -409,6 +411,7 @@
void imull(Register dst, Register src);
void imull(Register reg, const Immediate& imm);
+ void imull(Register dst, Register src, const Immediate& imm);
void imull(Register reg, const Address& address);
void imull(Register reg);
@@ -465,6 +468,8 @@
void jmp(Label* label);
void repne_scasw();
+ void repe_cmpsw();
+ void repe_cmpsl();
X86Assembler* lock();
void cmpxchgl(const Address& address, Register reg);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index aacc57b..b664d23 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -105,6 +105,16 @@
DriverStr(expected, "movl");
}
+TEST_F(AssemblerX86Test, Movntl) {
+ GetAssembler()->movntl(x86::Address(x86::EDI, x86::EBX, x86::TIMES_4, 12), x86::EAX);
+ GetAssembler()->movntl(x86::Address(x86::EDI, 0), x86::EAX);
+ const char* expected =
+ "movntil %EAX, 0xc(%EDI,%EBX,4)\n"
+ "movntil %EAX, (%EDI)\n";
+
+ DriverStr(expected, "movntl");
+}
+
TEST_F(AssemblerX86Test, psrlq) {
GetAssembler()->psrlq(x86::XMM0, CreateImmediate(32));
const char* expected = "psrlq $0x20, %xmm0\n";
@@ -196,4 +206,16 @@
DriverStr(expected, "Repnescasw");
}
+TEST_F(AssemblerX86Test, Repecmpsw) {
+ GetAssembler()->repe_cmpsw();
+ const char* expected = "repe cmpsw\n";
+ DriverStr(expected, "Repecmpsw");
+}
+
+TEST_F(AssemblerX86Test, Repecmpsl) {
+ GetAssembler()->repe_cmpsl();
+ const char* expected = "repe cmpsl\n";
+ DriverStr(expected, "Repecmpsl");
+}
+
} // namespace art
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index f35f51c..22e7b9b 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -194,6 +194,21 @@
EmitImmediate(imm);
}
+void X86_64Assembler::movntl(const Address& dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(src, dst);
+ EmitUint8(0x0F);
+ EmitUint8(0xC3);
+ EmitOperand(src.LowBits(), dst);
+}
+
+void X86_64Assembler::movntq(const Address& dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(src, dst);
+ EmitUint8(0x0F);
+ EmitUint8(0xC3);
+ EmitOperand(src.LowBits(), dst);
+}
void X86_64Assembler::cmov(Condition c, CpuRegister dst, CpuRegister src) {
cmov(c, dst, src, true);
@@ -1672,28 +1687,33 @@
EmitOperand(dst.LowBits(), Operand(src));
}
-void X86_64Assembler::imull(CpuRegister reg, const Immediate& imm) {
+void X86_64Assembler::imull(CpuRegister dst, CpuRegister src, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
CHECK(imm.is_int32()); // imull only supports 32b immediate.
- EmitOptionalRex32(reg, reg);
+ EmitOptionalRex32(dst, src);
// See whether imm can be represented as a sign-extended 8bit value.
int32_t v32 = static_cast<int32_t>(imm.value());
if (IsInt<8>(v32)) {
// Sign-extension works.
EmitUint8(0x6B);
- EmitOperand(reg.LowBits(), Operand(reg));
+ EmitOperand(dst.LowBits(), Operand(src));
EmitUint8(static_cast<uint8_t>(v32 & 0xFF));
} else {
// Not representable, use full immediate.
EmitUint8(0x69);
- EmitOperand(reg.LowBits(), Operand(reg));
+ EmitOperand(dst.LowBits(), Operand(src));
EmitImmediate(imm);
}
}
+void X86_64Assembler::imull(CpuRegister reg, const Immediate& imm) {
+ imull(reg, reg, imm);
+}
+
+
void X86_64Assembler::imull(CpuRegister reg, const Address& address) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(reg, address);
@@ -2073,6 +2093,29 @@
}
+void X86_64Assembler::repe_cmpsw() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0xF3);
+ EmitUint8(0xA7);
+}
+
+
+void X86_64Assembler::repe_cmpsl() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0xA7);
+}
+
+
+void X86_64Assembler::repe_cmpsq() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitRex64();
+ EmitUint8(0xA7);
+}
+
+
void X86_64Assembler::LoadDoubleConstant(XmmRegister dst, double value) {
// TODO: Need to have a code constants table.
int64_t constant = bit_cast<int64_t, double>(value);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 61ffeab..b8e5fb6 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -326,6 +326,9 @@
void movq(CpuRegister dst, CpuRegister src);
void movl(CpuRegister dst, CpuRegister src);
+ void movntl(const Address& dst, CpuRegister src);
+ void movntq(const Address& dst, CpuRegister src);
+
void movq(CpuRegister dst, const Address& src);
void movl(CpuRegister dst, const Address& src);
void movq(const Address& dst, CpuRegister src);
@@ -539,6 +542,7 @@
void imull(CpuRegister dst, CpuRegister src);
void imull(CpuRegister reg, const Immediate& imm);
+ void imull(CpuRegister dst, CpuRegister src, const Immediate& imm);
void imull(CpuRegister reg, const Address& address);
void imulq(CpuRegister src);
@@ -603,6 +607,9 @@
void bswapq(CpuRegister dst);
void repne_scasw();
+ void repe_cmpsw();
+ void repe_cmpsl();
+ void repe_cmpsq();
//
// Macros for High-level operations.
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 6da5c35..296487e 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -35,7 +35,7 @@
ASSERT_EQ(static_cast<size_t>(5), buffer.Size());
}
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
static constexpr size_t kRandomIterations = 1000; // Devices might be puny, don't stress them...
#else
static constexpr size_t kRandomIterations = 100000; // Hosts are pretty powerful.
@@ -674,6 +674,46 @@
DriverStr(expected, "movq");
}
+TEST_F(AssemblerX86_64Test, Movntl) {
+ GetAssembler()->movntl(x86_64::Address(
+ x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12), x86_64::CpuRegister(x86_64::RAX));
+ GetAssembler()->movntl(x86_64::Address(
+ x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12), x86_64::CpuRegister(x86_64::RAX));
+ GetAssembler()->movntl(x86_64::Address(
+ x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12), x86_64::CpuRegister(x86_64::RAX));
+ GetAssembler()->movntl(x86_64::Address(x86_64::CpuRegister(x86_64::R13), 0), x86_64::CpuRegister(x86_64::RAX));
+ GetAssembler()->movntl(x86_64::Address(
+ x86_64::CpuRegister(x86_64::R13), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_1, 0), x86_64::CpuRegister(x86_64::R9));
+ const char* expected =
+ "movntil %EAX, 0xc(%RDI,%RBX,4)\n"
+ "movntil %EAX, 0xc(%RDI,%R9,4)\n"
+ "movntil %EAX, 0xc(%RDI,%R9,4)\n"
+ "movntil %EAX, (%R13)\n"
+ "movntil %R9d, (%R13,%R9,1)\n";
+
+ DriverStr(expected, "movntl");
+}
+
+TEST_F(AssemblerX86_64Test, Movntq) {
+ GetAssembler()->movntq(x86_64::Address(
+ x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12), x86_64::CpuRegister(x86_64::RAX));
+ GetAssembler()->movntq(x86_64::Address(
+ x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12), x86_64::CpuRegister(x86_64::RAX));
+ GetAssembler()->movntq(x86_64::Address(
+ x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12), x86_64::CpuRegister(x86_64::RAX));
+ GetAssembler()->movntq(x86_64::Address(x86_64::CpuRegister(x86_64::R13), 0), x86_64::CpuRegister(x86_64::RAX));
+ GetAssembler()->movntq(x86_64::Address(
+ x86_64::CpuRegister(x86_64::R13), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_1, 0), x86_64::CpuRegister(x86_64::R9));
+ const char* expected =
+ "movntiq %RAX, 0xc(%RDI,%RBX,4)\n"
+ "movntiq %RAX, 0xc(%RDI,%R9,4)\n"
+ "movntiq %RAX, 0xc(%RDI,%R9,4)\n"
+ "movntiq %RAX, (%R13)\n"
+ "movntiq %R9, (%R13,%R9,1)\n";
+
+ DriverStr(expected, "movntq");
+}
+
TEST_F(AssemblerX86_64Test, Cvtsi2ssAddr) {
GetAssembler()->cvtsi2ss(x86_64::XmmRegister(x86_64::XMM0),
x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0),
@@ -1263,4 +1303,22 @@
DriverStr(expected, "Repnescasw");
}
+TEST_F(AssemblerX86_64Test, Repecmpsw) {
+ GetAssembler()->repe_cmpsw();
+ const char* expected = "repe cmpsw\n";
+ DriverStr(expected, "Repecmpsw");
+}
+
+TEST_F(AssemblerX86_64Test, Repecmpsl) {
+ GetAssembler()->repe_cmpsl();
+ const char* expected = "repe cmpsl\n";
+ DriverStr(expected, "Repecmpsl");
+}
+
+TEST_F(AssemblerX86_64Test, Repecmpsq) {
+ GetAssembler()->repe_cmpsq();
+ const char* expected = "repe cmpsq\n";
+ DriverStr(expected, "Repecmpsq");
+}
+
} // namespace art
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 74ec2ed..75d6137 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -280,6 +280,18 @@
UsageError(" Example: --num-dex-method=%d", CompilerOptions::kDefaultNumDexMethodsThreshold);
UsageError(" Default: %d", CompilerOptions::kDefaultNumDexMethodsThreshold);
UsageError("");
+ UsageError(" --inline-depth-limit=<depth-limit>: the depth limit of inlining for fine tuning");
+ UsageError(" the compiler. A zero value will disable inlining. Honored only by Optimizing.");
+ UsageError(" Example: --inline-depth-limit=%d", CompilerOptions::kDefaultInlineDepthLimit);
+ UsageError(" Default: %d", CompilerOptions::kDefaultInlineDepthLimit);
+ UsageError("");
+ UsageError(" --inline-max-code-units=<code-units-count>: the maximum code units that a method");
+ UsageError(" can have to be considered for inlining. A zero value will disable inlining.");
+ UsageError(" Honored only by Optimizing.");
+ UsageError(" Example: --inline-max-code-units=%d",
+ CompilerOptions::kDefaultInlineMaxCodeUnits);
+ UsageError(" Default: %d", CompilerOptions::kDefaultInlineMaxCodeUnits);
+ UsageError("");
UsageError(" --dump-timing: display a breakdown of where time was spent");
UsageError("");
UsageError(" --include-patch-information: Include patching information so the generated code");
@@ -550,6 +562,8 @@
int small_method_threshold = CompilerOptions::kDefaultSmallMethodThreshold;
int tiny_method_threshold = CompilerOptions::kDefaultTinyMethodThreshold;
int num_dex_methods_threshold = CompilerOptions::kDefaultNumDexMethodsThreshold;
+ int inline_depth_limit = CompilerOptions::kDefaultInlineDepthLimit;
+ int inline_max_code_units = CompilerOptions::kDefaultInlineMaxCodeUnits;
// Profile file to use
double top_k_profile_threshold = CompilerOptions::kDefaultTopKProfileThreshold;
@@ -720,6 +734,22 @@
if (num_dex_methods_threshold < 0) {
Usage("--num-dex-methods passed a negative value %s", num_dex_methods_threshold);
}
+ } else if (option.starts_with("--inline-depth-limit=")) {
+ const char* limit = option.substr(strlen("--inline-depth-limit=")).data();
+ if (!ParseInt(limit, &inline_depth_limit)) {
+ Usage("Failed to parse --inline-depth-limit '%s' as an integer", limit);
+ }
+ if (inline_depth_limit < 0) {
+ Usage("--inline-depth-limit passed a negative value %s", inline_depth_limit);
+ }
+ } else if (option.starts_with("--inline-max-code-units=")) {
+ const char* code_units = option.substr(strlen("--inline-max-code-units=")).data();
+ if (!ParseInt(code_units, &inline_max_code_units)) {
+ Usage("Failed to parse --inline-max-code-units '%s' as an integer", code_units);
+ }
+ if (inline_max_code_units < 0) {
+ Usage("--inline-max-code-units passed a negative value %s", inline_max_code_units);
+ }
} else if (option == "--host") {
is_host_ = true;
} else if (option == "--runtime-arg") {
@@ -992,6 +1022,8 @@
small_method_threshold,
tiny_method_threshold,
num_dex_methods_threshold,
+ inline_depth_limit,
+ inline_max_code_units,
include_patch_information,
top_k_profile_threshold,
debuggable,
@@ -1660,7 +1692,7 @@
// Let the ImageWriter write the image file. If we do not compile PIC, also fix up the oat file.
bool CreateImageFile()
- LOCKS_EXCLUDED(Locks::mutator_lock_) {
+ REQUIRES(!Locks::mutator_lock_) {
CHECK(image_writer_ != nullptr);
if (!image_writer_->Write(image_filename_, oat_unstripped_, oat_location_)) {
LOG(ERROR) << "Failed to create image file " << image_filename_;
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 31e653b..d1d3481 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -1455,6 +1455,20 @@
} // else unknown instruction
break;
}
+ case 0x2B: { // 0101011
+ // CLZ - 111 11 0101011 mmmm 1111 dddd 1000 mmmm
+ if ((instr & 0xf0f0) == 0xf080) {
+ opcode << "clz";
+ ArmRegister Rm(instr, 0);
+ ArmRegister Rd(instr, 8);
+ args << Rd << ", " << Rm;
+ ArmRegister Rm2(instr, 16);
+ if (Rm.r != Rm2.r || Rm.r == 13 || Rm.r == 15 || Rd.r == 13 || Rd.r == 15) {
+ args << " (UNPREDICTABLE)";
+ }
+ }
+ break;
+ }
default: // more formats
if ((op2 >> 4) == 2) { // 010xxxx
// data processing (register)
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 2ead4a2..44787a7 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -1117,6 +1117,9 @@
opcode1 = opcode_tmp.c_str();
}
break;
+ case 0xA7:
+ opcode1 = (prefix[2] == 0x66 ? "cmpsw" : "cmpsl");
+ break;
case 0xAF:
opcode1 = (prefix[2] == 0x66 ? "scasw" : "scasl");
break;
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index dce5206..304d4e5 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -56,7 +56,7 @@
image_location_(image_location),
image_diff_pid_(image_diff_pid) {}
- bool Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool Dump() SHARED_REQUIRES(Locks::mutator_lock_) {
std::ostream& os = *os_;
os << "MAGIC: " << image_header_.GetMagic() << "\n\n";
@@ -92,7 +92,7 @@
return str.substr(idx + 1);
}
- bool DumpImageDiff(pid_t image_diff_pid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool DumpImageDiff(pid_t image_diff_pid) SHARED_REQUIRES(Locks::mutator_lock_) {
std::ostream& os = *os_;
{
@@ -140,7 +140,7 @@
// Look at /proc/$pid/mem and only diff the things from there
bool DumpImageDiffMap(pid_t image_diff_pid, const backtrace_map_t& boot_map)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::ostream& os = *os_;
const size_t pointer_size = InstructionSetPointerSize(
Runtime::Current()->GetInstructionSet());
@@ -683,7 +683,7 @@
}
static std::string GetClassDescriptor(mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(klass != nullptr);
std::string descriptor;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 9325454..99140d4 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -159,7 +159,7 @@
void WalkOatDexFile(const OatFile::OatDexFile* oat_dex_file, Callback callback) {
std::string error_msg;
- std::unique_ptr<const DexFile> dex_file(oat_dex_file->OpenDexFile(&error_msg));
+ std::unique_ptr<const DexFile> dex_file(oat_dex_file->OpenDexFile(outof(error_msg)));
if (dex_file.get() == nullptr) {
return;
}
@@ -499,12 +499,12 @@
return oat_file_.GetOatHeader().GetInstructionSet();
}
- const void* GetQuickOatCode(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const void* GetQuickOatCode(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_) {
for (size_t i = 0; i < oat_dex_files_.size(); i++) {
const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
CHECK(oat_dex_file != nullptr);
std::string error_msg;
- std::unique_ptr<const DexFile> dex_file(oat_dex_file->OpenDexFile(&error_msg));
+ std::unique_ptr<const DexFile> dex_file(oat_dex_file->OpenDexFile(outof(error_msg)));
if (dex_file.get() == nullptr) {
LOG(WARNING) << "Failed to open dex file '" << oat_dex_file->GetDexFileLocation()
<< "': " << error_msg;
@@ -533,7 +533,7 @@
const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
CHECK(oat_dex_file != nullptr);
std::string error_msg;
- std::unique_ptr<const DexFile> dex_file(oat_dex_file->OpenDexFile(&error_msg));
+ std::unique_ptr<const DexFile> dex_file(oat_dex_file->OpenDexFile(outof(error_msg)));
if (dex_file.get() == nullptr) {
LOG(WARNING) << "Failed to open dex file '" << oat_dex_file->GetDexFileLocation()
<< "': " << error_msg;
@@ -593,7 +593,7 @@
// Create the verifier early.
std::string error_msg;
- std::unique_ptr<const DexFile> dex_file(oat_dex_file.OpenDexFile(&error_msg));
+ std::unique_ptr<const DexFile> dex_file(oat_dex_file.OpenDexFile(outof(error_msg)));
if (dex_file.get() == nullptr) {
os << "NOT FOUND: " << error_msg << "\n\n";
os << std::flush;
@@ -638,7 +638,7 @@
std::string error_msg;
std::string dex_file_location = oat_dex_file.GetDexFileLocation();
- std::unique_ptr<const DexFile> dex_file(oat_dex_file.OpenDexFile(&error_msg));
+ std::unique_ptr<const DexFile> dex_file(oat_dex_file.OpenDexFile(outof(error_msg)));
if (dex_file == nullptr) {
os << "Failed to open dex file '" << dex_file_location << "': " << error_msg;
return false;
@@ -1462,7 +1462,7 @@
image_header_(image_header),
oat_dumper_options_(oat_dumper_options) {}
- bool Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool Dump() SHARED_REQUIRES(Locks::mutator_lock_) {
std::ostream& os = *os_;
std::ostream& indent_os = vios_.Stream();
@@ -1553,7 +1553,7 @@
if (oat_file == nullptr) {
oat_file = OatFile::Open(oat_location, oat_location,
nullptr, nullptr, false, nullptr,
- &error_msg);
+ outof(error_msg));
if (oat_file == nullptr) {
os << "NOT FOUND: " << error_msg << "\n";
return false;
@@ -1664,7 +1664,7 @@
private:
static void PrettyObjectValue(std::ostream& os, mirror::Class* type, mirror::Object* value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(type != nullptr);
if (value == nullptr) {
os << StringPrintf("null %s\n", PrettyDescriptor(type).c_str());
@@ -1681,7 +1681,7 @@
}
static void PrintField(std::ostream& os, ArtField* field, mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
os << StringPrintf("%s: ", field->GetName());
switch (field->GetTypeAsPrimitiveType()) {
case Primitive::kPrimLong:
@@ -1734,7 +1734,7 @@
}
static void DumpFields(std::ostream& os, mirror::Object* obj, mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Class* super = klass->GetSuperClass();
if (super != nullptr) {
DumpFields(os, obj, super);
@@ -1750,7 +1750,7 @@
}
const void* GetQuickOatCodeBegin(ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const void* quick_code = m->GetEntryPointFromQuickCompiledCodePtrSize(
InstructionSetPointerSize(oat_dumper_->GetOatInstructionSet()));
if (Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(quick_code)) {
@@ -1763,7 +1763,7 @@
}
uint32_t GetQuickOatCodeSize(ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const uint32_t* oat_code_begin = reinterpret_cast<const uint32_t*>(GetQuickOatCodeBegin(m));
if (oat_code_begin == nullptr) {
return 0;
@@ -1772,7 +1772,7 @@
}
const void* GetQuickOatCodeEnd(ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const uint8_t* oat_code_begin = reinterpret_cast<const uint8_t*>(GetQuickOatCodeBegin(m));
if (oat_code_begin == nullptr) {
return nullptr;
@@ -1780,7 +1780,7 @@
return oat_code_begin + GetQuickOatCodeSize(m);
}
- static void Callback(mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
DCHECK(arg != nullptr);
ImageDumper* state = reinterpret_cast<ImageDumper*>(arg);
@@ -1882,7 +1882,7 @@
}
void DumpMethod(ArtMethod* method, ImageDumper* state, std::ostream& indent_os)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(method != nullptr);
const auto image_pointer_size =
InstructionSetPointerSize(state->oat_dumper_->GetOatInstructionSet());
@@ -2070,7 +2070,7 @@
}
void DumpOutliers(std::ostream& os)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
size_t sum_of_sizes = 0;
size_t sum_of_sizes_squared = 0;
size_t sum_of_expansion = 0;
@@ -2171,7 +2171,7 @@
}
void Dump(std::ostream& os, std::ostream& indent_os)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
{
os << "art_file_bytes = " << PrettySize(file_bytes) << "\n\n"
<< "art_file_bytes = header_bytes + object_bytes + alignment_bytes\n";
@@ -2321,7 +2321,7 @@
std::vector<std::unique_ptr<const DexFile>> dex_files;
for (const OatFile::OatDexFile* odf : oat_file->GetOatDexFiles()) {
std::string error_msg;
- std::unique_ptr<const DexFile> dex_file = odf->OpenDexFile(&error_msg);
+ std::unique_ptr<const DexFile> dex_file = odf->OpenDexFile(outof(error_msg));
CHECK(dex_file != nullptr) << error_msg;
class_linker->RegisterDexFile(*dex_file);
dex_files.push_back(std::move(dex_file));
@@ -2361,7 +2361,7 @@
std::ostream* os) {
std::string error_msg;
OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, nullptr, nullptr, false,
- nullptr, &error_msg);
+ nullptr, outof(error_msg));
if (oat_file == nullptr) {
fprintf(stderr, "Failed to open oat file from '%s': %s\n", oat_filename, error_msg.c_str());
return EXIT_FAILURE;
@@ -2377,7 +2377,7 @@
static int SymbolizeOat(const char* oat_filename, std::string& output_name) {
std::string error_msg;
OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, nullptr, nullptr, false,
- nullptr, &error_msg);
+ nullptr, outof(error_msg));
if (oat_file == nullptr) {
fprintf(stderr, "Failed to open oat file from '%s': %s\n", oat_filename, error_msg.c_str());
return EXIT_FAILURE;
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index dbd1d23..1ed6597 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -445,7 +445,7 @@
}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
*roots[i] = patch_oat_->RelocatedAddressOfPointer(*roots[i]);
}
@@ -453,7 +453,7 @@
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
roots[i]->Assign(patch_oat_->RelocatedAddressOfPointer(roots[i]->AsMirrorPtr()));
}
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 23abca8..466dacb 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -94,16 +94,16 @@
bool new_oat_out); // Output oat was newly created?
static void BitmapCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
reinterpret_cast<PatchOat*>(arg)->VisitObject(obj);
}
void VisitObject(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void FixupMethod(ArtMethod* object, ArtMethod* copy)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void FixupNativePointerArray(mirror::PointerArray* object)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool InHeap(mirror::Object*);
// Patches oat in place, modifying the oat_file given to the constructor.
@@ -113,13 +113,13 @@
template <typename ElfFileImpl>
bool PatchOatHeader(ElfFileImpl* oat_file);
- bool PatchImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void PatchArtFields(const ImageHeader* image_header) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void PatchArtMethods(const ImageHeader* image_header) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool PatchImage() SHARED_REQUIRES(Locks::mutator_lock_);
+ void PatchArtFields(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
+ void PatchArtMethods(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
void PatchInternedStrings(const ImageHeader* image_header)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool WriteElf(File* out);
bool WriteImage(File* out);
@@ -177,10 +177,15 @@
PatchVisitor(PatchOat* patcher, mirror::Object* copy) : patcher_(patcher), copy_(copy) {}
~PatchVisitor() {}
void operator() (mirror::Object* obj, MemberOffset off, bool b) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// For reference classes.
void operator() (mirror::Class* cls, mirror::Reference* ref) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ // TODO: Consider using these for updating native class roots?
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+ const {}
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+
private:
PatchOat* const patcher_;
mirror::Object* const copy_;
diff --git a/runtime/Android.mk b/runtime/Android.mk
index fe79e72..8f70d30 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -39,6 +39,7 @@
base/unix_file/random_access_file_utils.cc \
check_jni.cc \
class_linker.cc \
+ class_table.cc \
common_throws.cc \
debugger.cc \
dex_file.cc \
@@ -340,10 +341,13 @@
LIBART_CFLAGS := -DBUILDING_LIBART=1
+LIBART_TARGET_CFLAGS :=
+LIBART_HOST_CFLAGS :=
+
ifeq ($(MALLOC_IMPL),dlmalloc)
- LIBART_CFLAGS += -DUSE_DLMALLOC
+ LIBART_TARGET_CFLAGS += -DUSE_DLMALLOC
else
- LIBART_CFLAGS += -DUSE_JEMALLOC
+ LIBART_TARGET_CFLAGS += -DUSE_JEMALLOC
endif
# Default dex2oat instruction set features.
@@ -389,13 +393,6 @@
art_static_or_shared := $(3)
include $$(CLEAR_VARS)
- # Clang assembler has problem with macros in asm_support_x86.S, http://b/17443165,
- # on linux. Yet sdk on mac needs integrated assembler.
- ifeq ($$(HOST_OS),darwin)
- LOCAL_CLANG_ASFLAGS += -integrated-as
- else
- LOCAL_CLANG_ASFLAGS += -no-integrated-as
- endif
LOCAL_CPP_EXTENSION := $$(ART_CPP_EXTENSION)
ifeq ($$(art_ndebug_or_debug),ndebug)
LOCAL_MODULE := libart
@@ -439,8 +436,10 @@
LOCAL_CFLAGS := $$(LIBART_CFLAGS)
LOCAL_LDFLAGS := $$(LIBART_LDFLAGS)
ifeq ($$(art_target_or_host),target)
+ LOCAL_CFLAGS += $$(LIBART_TARGET_CFLAGS)
LOCAL_LDFLAGS += $$(LIBART_TARGET_LDFLAGS)
else #host
+ LOCAL_CFLAGS += $$(LIBART_HOST_CFLAGS)
LOCAL_LDFLAGS += $$(LIBART_HOST_LDFLAGS)
ifeq ($$(art_static_or_shared),static)
LOCAL_LDFLAGS += -static
@@ -580,4 +579,6 @@
LIBART_HOST_SRC_FILES_64 :=
LIBART_ENUM_OPERATOR_OUT_HEADER_FILES :=
LIBART_CFLAGS :=
+LIBART_TARGET_CFLAGS :=
+LIBART_HOST_CFLAGS :=
build-libart :=
diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S
index 665d2a3..44c7649 100644
--- a/runtime/arch/arm/asm_support_arm.S
+++ b/runtime/arch/arm/asm_support_arm.S
@@ -50,6 +50,11 @@
// generated at END.
.macro DEF_ENTRY thumb_or_arm, name
\thumb_or_arm
+// Clang ignores .thumb_func and requires an explicit .thumb. Investigate whether we should still
+// carry around the .thumb_func.
+ .ifc \thumb_or_arm, .thumb_func
+ .thumb
+ .endif
.type \name, #function
.hidden \name // Hide this as a global symbol, so we do not incur plt calls.
.global \name
diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h
index a58aecb..77bb5c8 100644
--- a/runtime/arch/arm/context_arm.h
+++ b/runtime/arch/arm/context_arm.h
@@ -35,7 +35,7 @@
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
void SetSP(uintptr_t new_sp) OVERRIDE {
SetGPR(SP, new_sp);
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 2f2654d..be9af98 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -171,6 +171,7 @@
// Read barrier
qpoints->pReadBarrierJni = ReadBarrierJni;
+ qpoints->pReadBarrierSlow = artReadBarrierSlow;
}
} // namespace art
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index f8590d3..28d1942 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -16,7 +16,7 @@
#include "instruction_set_features_arm.h"
-#if defined(HAVE_ANDROID_OS) && defined(__arm__)
+#if defined(__ANDROID__) && defined(__arm__)
#include <sys/auxv.h>
#include <asm/hwcap.h>
#endif
@@ -166,7 +166,7 @@
bool has_div = false;
bool has_lpae = false;
-#if defined(HAVE_ANDROID_OS) && defined(__arm__)
+#if defined(__ANDROID__) && defined(__arm__)
uint64_t hwcaps = getauxval(AT_HWCAP);
LOG(INFO) << "hwcaps=" << hwcaps;
if ((hwcaps & HWCAP_IDIVT) != 0) {
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 2000110..f6d954f 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -51,7 +51,6 @@
sub sp, #12 @ 3 words of space, bottom word will hold Method*
.cfi_adjust_cfa_offset 12
RUNTIME_CURRENT1 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
- THIS_LOAD_REQUIRES_READ_BARRIER
ldr \rTemp1, [\rTemp1, #RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kSaveAll Method*.
str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
@@ -79,7 +78,6 @@
sub sp, #4 @ bottom word will hold Method*
.cfi_adjust_cfa_offset 4
RUNTIME_CURRENT2 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
- THIS_LOAD_REQUIRES_READ_BARRIER
ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kRefsOnly Method*.
str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
@@ -139,7 +137,6 @@
.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME rTemp1, rTemp2
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
RUNTIME_CURRENT3 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
- THIS_LOAD_REQUIRES_READ_BARRIER
@ rTemp1 is kRefsAndArgs Method*.
ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET]
str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
@@ -171,7 +168,6 @@
.cfi_adjust_cfa_offset -40
.endm
-
.macro RETURN_IF_RESULT_IS_ZERO
cbnz r0, 1f @ result non-zero branch over
bx lr @ return
@@ -588,6 +584,59 @@
bkpt
END art_quick_check_cast
+// Restore rReg's value from [sp, #offset] if rReg is not the same as rExclude.
+.macro POP_REG_NE rReg, offset, rExclude
+ .ifnc \rReg, \rExclude
+ ldr \rReg, [sp, #\offset] @ restore rReg
+ .cfi_restore \rReg
+ .endif
+.endm
+
+ /*
+ * Macro to insert read barrier, only used in art_quick_aput_obj.
+ * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET.
+ * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
+ */
+.macro READ_BARRIER rDest, rObj, offset
+#ifdef USE_READ_BARRIER
+ push {r0-r3, ip, lr} @ 6 words for saved registers (used in art_quick_aput_obj)
+ .cfi_adjust_cfa_offset 24
+ .cfi_rel_offset r0, 0
+ .cfi_rel_offset r1, 4
+ .cfi_rel_offset r2, 8
+ .cfi_rel_offset r3, 12
+ .cfi_rel_offset ip, 16
+ .cfi_rel_offset lr, 20
+ sub sp, #8 @ push padding
+ .cfi_adjust_cfa_offset 8
+ @ mov r0, r0 @ pass ref in r0 (no-op for now since parameter ref is unused)
+ .ifnc \rObj, r1
+ mov r1, \rObj @ pass rObj
+ .endif
+ mov r2, #\offset @ pass offset
+ bl artReadBarrierSlow @ artReadBarrierSlow(ref, rObj, offset)
+ @ No need to unpoison return value in r0, artReadBarrierSlow() would do the unpoisoning.
+ .ifnc \rDest, r0
+ mov \rDest, r0 @ save return value in rDest
+ .endif
+ add sp, #8 @ pop padding
+ .cfi_adjust_cfa_offset -8
+ POP_REG_NE r0, 0, \rDest @ conditionally restore saved registers
+ POP_REG_NE r1, 4, \rDest
+ POP_REG_NE r2, 8, \rDest
+ POP_REG_NE r3, 12, \rDest
+ POP_REG_NE ip, 16, \rDest
+ add sp, #20
+ .cfi_adjust_cfa_offset -20
+ pop {lr} @ restore lr
+ .cfi_adjust_cfa_offset -4
+ .cfi_restore lr
+#else
+ ldr \rDest, [\rObj, #\offset]
+ UNPOISON_HEAP_REF \rDest
+#endif // USE_READ_BARRIER
+.endm
+
/*
* Entry from managed code for array put operations of objects where the value being stored
* needs to be checked for compatibility.
@@ -609,15 +658,21 @@
b art_quick_throw_array_bounds
END art_quick_aput_obj_with_bound_check
+#ifdef USE_READ_BARRIER
+ .extern artReadBarrierSlow
+#endif
.hidden art_quick_aput_obj
ENTRY art_quick_aput_obj
+#ifdef USE_READ_BARRIER
+ @ The offset to .Ldo_aput_null is too large to use cbz due to expansion from READ_BARRIER macro.
+ tst r2, r2
+ beq .Ldo_aput_null
+#else
cbz r2, .Ldo_aput_null
- ldr r3, [r0, #MIRROR_OBJECT_CLASS_OFFSET]
- UNPOISON_HEAP_REF r3
- ldr ip, [r2, #MIRROR_OBJECT_CLASS_OFFSET]
- UNPOISON_HEAP_REF ip
- ldr r3, [r3, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET]
- UNPOISON_HEAP_REF r3
+#endif // USE_READ_BARRIER
+ READ_BARRIER r3, r0, MIRROR_OBJECT_CLASS_OFFSET
+ READ_BARRIER ip, r2, MIRROR_OBJECT_CLASS_OFFSET
+ READ_BARRIER r3, r3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
cmp r3, ip @ value's type == array's component type - trivial assignability
bne .Lcheck_assignability
.Ldo_aput:
diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h
index 0383ad6..1c99f3c 100644
--- a/runtime/arch/arm64/context_arm64.h
+++ b/runtime/arch/arm64/context_arm64.h
@@ -35,7 +35,7 @@
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
void SetSP(uintptr_t new_sp) OVERRIDE {
SetGPR(SP, new_sp);
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 2ce2a29..0f06727 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -155,6 +155,7 @@
// Read barrier
qpoints->pReadBarrierJni = ReadBarrierJni;
+ qpoints->pReadBarrierSlow = artReadBarrierSlow;
};
} // namespace art
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 6d9b44a..8ba3d43 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -31,8 +31,6 @@
ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
// xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
- THIS_LOAD_REQUIRES_READ_BARRIER
-
// Loads appropriate callee-save-method.
ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ]
@@ -95,8 +93,6 @@
ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
// xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefOnly] .
- THIS_LOAD_REQUIRES_READ_BARRIER
-
// Loads appropriate callee-save-method.
ldr xIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ]
@@ -251,7 +247,6 @@
ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
// xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
- THIS_LOAD_REQUIRES_READ_BARRIER
ldr xIP0, [xIP0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ]
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
@@ -542,18 +537,18 @@
// W10 - temporary
add x9, sp, #8 // Destination address is bottom of stack + null.
- // Use \@ to differentiate between macro invocations.
-.LcopyParams\@:
+ // Copy parameters into the stack. Use numeric label as this is a macro and Clang's assembler
+ // does not have unique-id variables.
+1:
cmp w2, #0
- beq .LendCopyParams\@
+ beq 2f
sub w2, w2, #4 // Need 65536 bytes of range.
ldr w10, [x1, x2]
str w10, [x9, x2]
- b .LcopyParams\@
+ b 1b
-.LendCopyParams\@:
-
+2:
// Store null into ArtMethod* at bottom of frame.
str xzr, [sp]
.endm
@@ -592,26 +587,29 @@
// Store result (w0/x0/s0/d0) appropriately, depending on resultType.
ldrb w10, [x5]
+ // Check the return type and store the correct register into the jvalue in memory.
+ // Use numeric label as this is a macro and Clang's assembler does not have unique-id variables.
+
// Don't set anything for a void type.
cmp w10, #'V'
- beq .Lexit_art_quick_invoke_stub\@
+ beq 3f
+ // Is it a double?
cmp w10, #'D'
- bne .Lreturn_is_float\@
+ bne 1f
str d0, [x4]
- b .Lexit_art_quick_invoke_stub\@
+ b 3f
-.Lreturn_is_float\@:
+1: // Is it a float?
cmp w10, #'F'
- bne .Lreturn_is_int\@
+ bne 2f
str s0, [x4]
- b .Lexit_art_quick_invoke_stub\@
+ b 3f
- // Just store x0. Doesn't matter if it is 64 or 32 bits.
-.Lreturn_is_int\@:
+2: // Just store x0. Doesn't matter if it is 64 or 32 bits.
str x0, [x4]
-.Lexit_art_quick_invoke_stub\@:
+3: // Finish up.
ldp x2, x19, [xFP, #32] // Restore stack pointer and x19.
.cfi_restore x19
mov sp, x2
@@ -1119,6 +1117,62 @@
brk 0 // We should not return here...
END art_quick_check_cast
+// Restore xReg's value from [sp, #offset] if xReg is not the same as xExclude.
+.macro POP_REG_NE xReg, offset, xExclude
+ .ifnc \xReg, \xExclude
+ ldr \xReg, [sp, #\offset] // restore xReg
+ .cfi_restore \xReg
+ .endif
+.endm
+
+ /*
+ * Macro to insert read barrier, only used in art_quick_aput_obj.
+ * xDest, wDest and xObj are registers, offset is a defined literal such as
+ * MIRROR_OBJECT_CLASS_OFFSET. Dest needs both x and w versions of the same register to handle
+ * name mismatch between instructions. This macro uses the lower 32b of register when possible.
+ * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
+ */
+.macro READ_BARRIER xDest, wDest, xObj, offset
+#ifdef USE_READ_BARRIER
+ // Store registers used in art_quick_aput_obj (x0-x4, LR), stack is 16B aligned.
+ stp x0, x1, [sp, #-48]!
+ .cfi_adjust_cfa_offset 48
+ .cfi_rel_offset x0, 0
+ .cfi_rel_offset x1, 8
+ stp x2, x3, [sp, #16]
+ .cfi_rel_offset x2, 16
+ .cfi_rel_offset x3, 24
+ stp x4, xLR, [sp, #32]
+ .cfi_rel_offset x4, 32
+ .cfi_rel_offset x30, 40
+
+ // mov x0, x0 // pass ref in x0 (no-op for now since parameter ref is unused)
+ .ifnc \xObj, x1
+ mov x1, \xObj // pass xObj
+ .endif
+ mov w2, #\offset // pass offset
+ bl artReadBarrierSlow // artReadBarrierSlow(ref, xObj, offset)
+ // No need to unpoison return value in w0, artReadBarrierSlow() would do the unpoisoning.
+ .ifnc \wDest, w0
+ mov \wDest, w0 // save return value in wDest
+ .endif
+
+ // Conditionally restore saved registers
+ POP_REG_NE x0, 0, \xDest
+ POP_REG_NE x1, 8, \xDest
+ POP_REG_NE x2, 16, \xDest
+ POP_REG_NE x3, 24, \xDest
+ POP_REG_NE x4, 32, \xDest
+ ldr xLR, [sp, #40]
+ .cfi_restore x30
+ add sp, sp, #48
+ .cfi_adjust_cfa_offset -48
+#else
+ ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest.
+ UNPOISON_HEAP_REF \wDest
+#endif // USE_READ_BARRIER
+.endm
+
/*
* Entry from managed code for array put operations of objects where the value being stored
* needs to be checked for compatibility.
@@ -1146,17 +1200,17 @@
b art_quick_throw_array_bounds
END art_quick_aput_obj_with_bound_check
+#ifdef USE_READ_BARRIER
+ .extern artReadBarrierSlow
+#endif
ENTRY art_quick_aput_obj
cbz x2, .Ldo_aput_null
- ldr w3, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Heap reference = 32b
+ READ_BARRIER x3, w3, x0, MIRROR_OBJECT_CLASS_OFFSET // Heap reference = 32b
// This also zero-extends to x3
- UNPOISON_HEAP_REF w3
- ldr w4, [x2, #MIRROR_OBJECT_CLASS_OFFSET] // Heap reference = 32b
+ READ_BARRIER x4, w4, x2, MIRROR_OBJECT_CLASS_OFFSET // Heap reference = 32b
// This also zero-extends to x4
- UNPOISON_HEAP_REF w4
- ldr w3, [x3, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Heap reference = 32b
+ READ_BARRIER x3, w3, x3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET // Heap reference = 32b
// This also zero-extends to x3
- UNPOISON_HEAP_REF w3
cmp w3, w4 // value's type == array's component type - trivial assignability
bne .Lcheck_assignability
.Ldo_aput:
diff --git a/runtime/arch/context.h b/runtime/arch/context.h
index f86f9ae..9ef761e 100644
--- a/runtime/arch/context.h
+++ b/runtime/arch/context.h
@@ -42,7 +42,7 @@
// Reads values from callee saves in the given frame. The frame also holds
// the method that holds the layout.
virtual void FillCalleeSaves(const StackVisitor& fr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
// Sets the stack pointer value.
virtual void SetSP(uintptr_t new_sp) = 0;
diff --git a/runtime/arch/instruction_set_features_test.cc b/runtime/arch/instruction_set_features_test.cc
index e6f4e7a..99c2d4d 100644
--- a/runtime/arch/instruction_set_features_test.cc
+++ b/runtime/arch/instruction_set_features_test.cc
@@ -18,7 +18,7 @@
#include <gtest/gtest.h>
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
#include "cutils/properties.h"
#endif
@@ -26,7 +26,7 @@
namespace art {
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
#if defined(__aarch64__)
TEST(InstructionSetFeaturesTest, DISABLED_FeaturesFromSystemPropertyVariant) {
LOG(WARNING) << "Test disabled due to no CPP define for A53 erratum 835769";
@@ -111,7 +111,7 @@
}
#endif
-#ifndef HAVE_ANDROID_OS
+#ifndef __ANDROID__
TEST(InstructionSetFeaturesTest, HostFeaturesFromCppDefines) {
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> default_features(
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
index d01b95e..38cf29a 100644
--- a/runtime/arch/mips/context_mips.h
+++ b/runtime/arch/mips/context_mips.h
@@ -34,7 +34,7 @@
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
void SetSP(uintptr_t new_sp) OVERRIDE {
SetGPR(SP, new_sp);
diff --git a/runtime/arch/mips/entrypoints_direct_mips.h b/runtime/arch/mips/entrypoints_direct_mips.h
index b1aa3ee..f9c5315 100644
--- a/runtime/arch/mips/entrypoints_direct_mips.h
+++ b/runtime/arch/mips/entrypoints_direct_mips.h
@@ -44,7 +44,8 @@
entrypoint == kQuickCmpgDouble ||
entrypoint == kQuickCmpgFloat ||
entrypoint == kQuickCmplDouble ||
- entrypoint == kQuickCmplFloat;
+ entrypoint == kQuickCmplFloat ||
+ entrypoint == kQuickReadBarrierSlow;
}
} // namespace art
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 09a018e..4e4b91f 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -279,6 +279,8 @@
qpoints->pReadBarrierJni = ReadBarrierJni;
static_assert(!IsDirectEntrypoint(kQuickReadBarrierJni), "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierSlow = artReadBarrierSlow;
+ static_assert(IsDirectEntrypoint(kQuickReadBarrierSlow), "Direct C stub not marked direct.");
};
} // namespace art
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 2819f92..4d5004f 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -79,7 +79,6 @@
lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
lw $t0, 0($t0)
- THIS_LOAD_REQUIRES_READ_BARRIER
lw $t0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($t0)
sw $t0, 0($sp) # Place Method* at bottom of stack.
sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
@@ -127,7 +126,6 @@
lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
lw $t0, 0($t0)
- THIS_LOAD_REQUIRES_READ_BARRIER
lw $t0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($t0)
sw $t0, 0($sp) # Place Method* at bottom of stack.
sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
@@ -219,7 +217,6 @@
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
lw $t0, 0($t0)
- THIS_LOAD_REQUIRES_READ_BARRIER
lw $t0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($t0)
sw $t0, 0($sp) # Place Method* at bottom of stack.
sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
@@ -627,6 +624,76 @@
END art_quick_check_cast
/*
+ * Restore rReg's value from offset($sp) if rReg is not the same as rExclude.
+ * nReg is the register number for rReg.
+ */
+.macro POP_REG_NE rReg, nReg, offset, rExclude
+ .ifnc \rReg, \rExclude
+ lw \rReg, \offset($sp) # restore rReg
+ .cfi_restore \nReg
+ .endif
+.endm
+
+ /*
+ * Macro to insert read barrier, only used in art_quick_aput_obj.
+ * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET.
+ * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
+ */
+.macro READ_BARRIER rDest, rObj, offset
+#ifdef USE_READ_BARRIER
+ # saved registers used in art_quick_aput_obj: a0-a2, t0-t1, t9, ra. 8 words for 16B alignment.
+ addiu $sp, $sp, -32
+ .cfi_adjust_cfa_offset 32
+ sw $ra, 28($sp)
+ .cfi_rel_offset 31, 28
+ sw $t9, 24($sp)
+ .cfi_rel_offset 25, 24
+ sw $t1, 20($sp)
+ .cfi_rel_offset 9, 20
+ sw $t0, 16($sp)
+ .cfi_rel_offset 8, 16
+ sw $a2, 8($sp) # padding slot at offset 12 (padding can be any slot in the 32B)
+ .cfi_rel_offset 6, 8
+ sw $a1, 4($sp)
+ .cfi_rel_offset 5, 4
+ sw $a0, 0($sp)
+ .cfi_rel_offset 4, 0
+
+ # move $a0, $a0 # pass ref in a0 (no-op for now since parameter ref is unused)
+ .ifnc \rObj, $a1
+ move $a1, \rObj # pass rObj
+ .endif
+ addiu $a2, $zero, \offset # pass offset
+ jal artReadBarrierSlow # artReadBarrierSlow(ref, rObj, offset)
+ addiu $sp, $sp, -16 # Use branch delay slot to reserve argument slots on the stack
+ # before the call to artReadBarrierSlow.
+ addiu $sp, $sp, 16 # restore stack after call to artReadBarrierSlow
+ # No need to unpoison return value in v0, artReadBarrierSlow() would do the unpoisoning.
+ move \rDest, $v0 # save return value in rDest
+ # (rDest cannot be v0 in art_quick_aput_obj)
+
+ lw $a0, 0($sp) # restore registers except rDest
+ # (rDest can only be t0 or t1 in art_quick_aput_obj)
+ .cfi_restore 4
+ lw $a1, 4($sp)
+ .cfi_restore 5
+ lw $a2, 8($sp)
+ .cfi_restore 6
+ POP_REG_NE $t0, 8, 16, \rDest
+ POP_REG_NE $t1, 9, 20, \rDest
+ lw $t9, 24($sp)
+ .cfi_restore 25
+ lw $ra, 28($sp) # restore $ra
+ .cfi_restore 31
+ addiu $sp, $sp, 32
+ .cfi_adjust_cfa_offset -32
+#else
+ lw \rDest, \offset(\rObj)
+ UNPOISON_HEAP_REF \rDest
+#endif // USE_READ_BARRIER
+.endm
+
+ /*
* Entry from managed code for array put operations of objects where the value being stored
* needs to be checked for compatibility.
* a0 = array, a1 = index, a2 = value
@@ -648,15 +715,15 @@
move $a1, $t0
END art_quick_aput_obj_with_bound_check
+#ifdef USE_READ_BARRIER
+ .extern artReadBarrierSlow
+#endif
ENTRY art_quick_aput_obj
beqz $a2, .Ldo_aput_null
nop
- lw $t0, MIRROR_OBJECT_CLASS_OFFSET($a0)
- UNPOISON_HEAP_REF $t0
- lw $t1, MIRROR_OBJECT_CLASS_OFFSET($a2)
- UNPOISON_HEAP_REF $t1
- lw $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($t0)
- UNPOISON_HEAP_REF $t0
+ READ_BARRIER $t0, $a0, MIRROR_OBJECT_CLASS_OFFSET
+ READ_BARRIER $t1, $a2, MIRROR_OBJECT_CLASS_OFFSET
+ READ_BARRIER $t0, $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability
nop
.Ldo_aput:
diff --git a/runtime/arch/mips64/context_mips64.h b/runtime/arch/mips64/context_mips64.h
index ebc036c..e4a144f 100644
--- a/runtime/arch/mips64/context_mips64.h
+++ b/runtime/arch/mips64/context_mips64.h
@@ -34,7 +34,7 @@
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
void SetSP(uintptr_t new_sp) OVERRIDE {
SetGPR(SP, new_sp);
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 4904af9..ec02d5a 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -186,6 +186,7 @@
// Read barrier
qpoints->pReadBarrierJni = ReadBarrierJni;
+ qpoints->pReadBarrierSlow = artReadBarrierSlow;
};
} // namespace art
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index abca70b..c30e6ca 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -89,7 +89,6 @@
# load appropriate callee-save-method
ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
ld $t1, 0($t1)
- THIS_LOAD_REQUIRES_READ_BARRIER
ld $t1, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($t1)
sd $t1, 0($sp) # Place ArtMethod* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
@@ -132,7 +131,6 @@
# load appropriate callee-save-method
ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
ld $t1, 0($t1)
- THIS_LOAD_REQUIRES_READ_BARRIER
ld $t1, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($t1)
sd $t1, 0($sp) # Place Method* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
@@ -255,7 +253,6 @@
# load appropriate callee-save-method
ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
ld $t1, 0($t1)
- THIS_LOAD_REQUIRES_READ_BARRIER
ld $t1, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($t1)
sd $t1, 0($sp) # Place Method* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
@@ -888,6 +885,77 @@
move $a2, rSELF # pass Thread::Current
END art_quick_check_cast
+
+ /*
+ * Restore rReg's value from offset($sp) if rReg is not the same as rExclude.
+ * nReg is the register number for rReg.
+ */
+.macro POP_REG_NE rReg, nReg, offset, rExclude
+ .ifnc \rReg, \rExclude
+ ld \rReg, \offset($sp) # restore rReg
+ .cfi_restore \nReg
+ .endif
+.endm
+
+ /*
+ * Macro to insert read barrier, only used in art_quick_aput_obj.
+ * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET.
+ * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
+ */
+.macro READ_BARRIER rDest, rObj, offset
+#ifdef USE_READ_BARRIER
+ # saved registers used in art_quick_aput_obj: a0-a2, t0-t1, t9, ra. 16B-aligned.
+ daddiu $sp, $sp, -64
+ .cfi_adjust_cfa_offset 64
+ sd $ra, 56($sp)
+ .cfi_rel_offset 31, 56
+ sd $t9, 48($sp)
+ .cfi_rel_offset 25, 48
+ sd $t1, 40($sp)
+ .cfi_rel_offset 13, 40
+ sd $t0, 32($sp)
+ .cfi_rel_offset 12, 32
+ sd $a2, 16($sp) # padding slot at offset 24 (padding can be any slot in the 64B)
+ .cfi_rel_offset 6, 16
+ sd $a1, 8($sp)
+ .cfi_rel_offset 5, 8
+ sd $a0, 0($sp)
+ .cfi_rel_offset 4, 0
+
+ # move $a0, $a0 # pass ref in a0 (no-op for now since parameter ref is unused)
+ .ifnc \rObj, $a1
+ move $a1, \rObj # pass rObj
+ .endif
+ daddiu $a2, $zero, \offset # pass offset
+ jal artReadBarrierSlow # artReadBarrierSlow(ref, rObj, offset)
+ .cpreturn # Restore gp from t8 in branch delay slot.
+ # t8 may be clobbered in artReadBarrierSlow.
+ # No need to unpoison return value in v0, artReadBarrierSlow() would do the unpoisoning.
+ move \rDest, $v0 # save return value in rDest
+ # (rDest cannot be v0 in art_quick_aput_obj)
+
+ ld $a0, 0($sp) # restore registers except rDest
+ # (rDest can only be t0 or t1 in art_quick_aput_obj)
+ .cfi_restore 4
+ ld $a1, 8($sp)
+ .cfi_restore 5
+ ld $a2, 16($sp)
+ .cfi_restore 6
+ POP_REG_NE $t0, 12, 32, \rDest
+ POP_REG_NE $t1, 13, 40, \rDest
+ ld $t9, 48($sp)
+ .cfi_restore 25
+ ld $ra, 56($sp) # restore $ra
+ .cfi_restore 31
+ daddiu $sp, $sp, 64
+ .cfi_adjust_cfa_offset -64
+ SETUP_GP # set up gp because we are not returning
+#else
+ lwu \rDest, \offset(\rObj)
+ UNPOISON_HEAP_REF \rDest
+#endif // USE_READ_BARRIER
+.endm
+
/*
* Entry from managed code for array put operations of objects where the value being stored
* needs to be checked for compatibility.
@@ -913,12 +981,9 @@
ENTRY art_quick_aput_obj
beq $a2, $zero, .Ldo_aput_null
nop
- lwu $t0, MIRROR_OBJECT_CLASS_OFFSET($a0)
- UNPOISON_HEAP_REF $t0
- lwu $t1, MIRROR_OBJECT_CLASS_OFFSET($a2)
- UNPOISON_HEAP_REF $t1
- lwu $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($t0)
- UNPOISON_HEAP_REF $t0
+ READ_BARRIER $t0, $a0, MIRROR_OBJECT_CLASS_OFFSET
+ READ_BARRIER $t1, $a2, MIRROR_OBJECT_CLASS_OFFSET
+ READ_BARRIER $t0, $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability
nop
.Ldo_aput:
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 05b42f5..195b3b3 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -126,7 +126,7 @@
// Use the result from r0
: [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
[referrer] "r"(referrer)
- : "memory"); // clobber.
+ : "r0", "memory"); // clobber.
#elif defined(__aarch64__)
__asm__ __volatile__(
// Spill x0-x7 which we say we don't clobber. May contain args.
@@ -479,7 +479,7 @@
// Use the result from r0
: [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
[referrer] "r"(referrer), [hidden] "r"(hidden)
- : "memory"); // clobber.
+ : "r0", "memory"); // clobber.
#elif defined(__aarch64__)
__asm__ __volatile__(
// Spill x0-x7 which we say we don't clobber. May contain args.
@@ -1124,8 +1124,6 @@
TEST_F(StubTest, APutObj) {
- TEST_DISABLED_FOR_READ_BARRIER();
-
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
Thread* self = Thread::Current();
@@ -1258,8 +1256,6 @@
}
TEST_F(StubTest, AllocObject) {
- TEST_DISABLED_FOR_READ_BARRIER();
-
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
// This will lead to OOM error messages in the log.
@@ -1385,8 +1381,6 @@
}
TEST_F(StubTest, AllocObjectArray) {
- TEST_DISABLED_FOR_READ_BARRIER();
-
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
// TODO: Check the "Unresolved" allocation stubs
@@ -1474,8 +1468,6 @@
TEST_F(StubTest, StringCompareTo) {
- TEST_DISABLED_FOR_READ_BARRIER();
-
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
// TODO: Check the "Unresolved" allocation stubs
@@ -1557,7 +1549,7 @@
static void GetSetBooleanStatic(ArtField* f, Thread* self,
ArtMethod* referrer, StubTest* test)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
constexpr size_t num_values = 5;
@@ -1588,7 +1580,7 @@
}
static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
int8_t values[] = { -128, -64, 0, 64, 127 };
@@ -1619,7 +1611,7 @@
static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self,
ArtMethod* referrer, StubTest* test)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint8_t values[] = { 0, true, 2, 128, 0xFF };
@@ -1654,7 +1646,7 @@
}
static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
int8_t values[] = { -128, -64, 0, 64, 127 };
@@ -1689,7 +1681,7 @@
static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
@@ -1719,7 +1711,7 @@
}
static void GetSetShortStatic(ArtField* f, Thread* self,
ArtMethod* referrer, StubTest* test)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
@@ -1750,7 +1742,7 @@
static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
@@ -1784,7 +1776,7 @@
}
static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
@@ -1819,7 +1811,7 @@
static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
@@ -1855,7 +1847,7 @@
static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
@@ -1896,7 +1888,7 @@
static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self,
ArtMethod* referrer, StubTest* test)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
reinterpret_cast<size_t>(val),
0U,
@@ -1916,7 +1908,7 @@
static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
@@ -1940,7 +1932,7 @@
static void set_and_check_instance(ArtField* f, mirror::Object* trg,
mirror::Object* val, Thread* self, ArtMethod* referrer,
StubTest* test)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
reinterpret_cast<size_t>(trg),
reinterpret_cast<size_t>(val),
@@ -1963,7 +1955,7 @@
static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
@@ -1986,7 +1978,7 @@
static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
defined(__aarch64__)
uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
@@ -2017,7 +2009,7 @@
static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
defined(__aarch64__)
uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
@@ -2152,8 +2144,6 @@
}
TEST_F(StubTest, Fields8) {
- TEST_DISABLED_FOR_READ_BARRIER();
-
Thread* self = Thread::Current();
self->TransitionFromSuspendedToRunnable();
@@ -2166,8 +2156,6 @@
}
TEST_F(StubTest, Fields16) {
- TEST_DISABLED_FOR_READ_BARRIER();
-
Thread* self = Thread::Current();
self->TransitionFromSuspendedToRunnable();
@@ -2180,8 +2168,6 @@
}
TEST_F(StubTest, Fields32) {
- TEST_DISABLED_FOR_READ_BARRIER();
-
Thread* self = Thread::Current();
self->TransitionFromSuspendedToRunnable();
@@ -2193,8 +2179,6 @@
}
TEST_F(StubTest, FieldsObj) {
- TEST_DISABLED_FOR_READ_BARRIER();
-
Thread* self = Thread::Current();
self->TransitionFromSuspendedToRunnable();
@@ -2206,8 +2190,6 @@
}
TEST_F(StubTest, Fields64) {
- TEST_DISABLED_FOR_READ_BARRIER();
-
Thread* self = Thread::Current();
self->TransitionFromSuspendedToRunnable();
@@ -2221,8 +2203,6 @@
TEST_F(StubTest, IMT) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
- TEST_DISABLED_FOR_READ_BARRIER();
-
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
@@ -2342,8 +2322,6 @@
TEST_F(StubTest, StringIndexOf) {
#if defined(__arm__) || defined(__aarch64__)
- TEST_DISABLED_FOR_READ_BARRIER();
-
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
@@ -2416,4 +2394,40 @@
#endif
}
+TEST_F(StubTest, ReadBarrier) {
+#if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \
+ defined(__aarch64__) || defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__)))
+ Thread* self = Thread::Current();
+
+ const uintptr_t readBarrierSlow = StubTest::GetEntrypoint(self, kQuickReadBarrierSlow);
+
+ // Create an object
+ ScopedObjectAccess soa(self);
+ // garbage is created during ClassLinker::Init
+
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> c(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+
+ // Build an object instance
+ Handle<mirror::Object> obj(hs.NewHandle(c->AllocObject(soa.Self())));
+
+ EXPECT_FALSE(self->IsExceptionPending());
+
+ size_t result = Invoke3(0U, reinterpret_cast<size_t>(obj.Get()),
+ mirror::Object::ClassOffset().SizeValue(), readBarrierSlow, self);
+
+ EXPECT_FALSE(self->IsExceptionPending());
+ EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
+ mirror::Class* klass = reinterpret_cast<mirror::Class*>(result);
+ EXPECT_EQ(klass, obj->GetClass());
+
+ // Tests done.
+#else
+ LOG(INFO) << "Skipping read_barrier_slow";
+ // Force-print to std::cout so it's also outside the logcat.
+ std::cout << "Skipping read_barrier_slow" << std::endl;
+#endif
+}
+
} // namespace art
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index a783d48..c4a11d8 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -34,7 +34,7 @@
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
void SetSP(uintptr_t new_sp) OVERRIDE {
SetGPR(ESP, new_sp);
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index 737f4d1..e2632c1 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -28,6 +28,9 @@
extern "C" uint32_t art_quick_is_assignable(const mirror::Class* klass,
const mirror::Class* ref_class);
+// Read barrier entrypoints.
+extern "C" mirror::Object* art_quick_read_barrier_slow(mirror::Object*, mirror::Object*, uint32_t);
+
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
QuickEntryPoints* qpoints) {
// Interpreter
@@ -141,6 +144,7 @@
// Read barrier
qpoints->pReadBarrierJni = ReadBarrierJni;
+ qpoints->pReadBarrierSlow = art_quick_read_barrier_slow;
};
} // namespace art
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index ebfb3fa..1da5a2f 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -33,7 +33,6 @@
movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
// Push save all callee-save method.
- THIS_LOAD_REQUIRES_READ_BARRIER
pushl RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the top quick frame.
@@ -60,7 +59,6 @@
movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
// Push save all callee-save method.
- THIS_LOAD_REQUIRES_READ_BARRIER
pushl RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the top quick frame.
@@ -106,7 +104,6 @@
movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
// Push save all callee-save method.
- THIS_LOAD_REQUIRES_READ_BARRIER
pushl RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the stop quick frame.
@@ -1126,6 +1123,53 @@
UNREACHABLE
END_FUNCTION art_quick_check_cast
+// Restore reg's value if reg is not the same as exclude_reg, otherwise just adjust stack.
+MACRO2(POP_REG_NE, reg, exclude_reg)
+ .ifc RAW_VAR(reg), RAW_VAR(exclude_reg)
+ addl MACRO_LITERAL(4), %esp
+ CFI_ADJUST_CFA_OFFSET(-4)
+ .else
+ POP RAW_VAR(reg)
+ .endif
+END_MACRO
+
+ /*
+ * Macro to insert read barrier, only used in art_quick_aput_obj.
+ * obj_reg and dest_reg are registers, offset is a defined literal such as
+ * MIRROR_OBJECT_CLASS_OFFSET.
+ * pop_eax is a boolean flag, indicating if eax is popped after the call.
+ * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
+ */
+MACRO4(READ_BARRIER, obj_reg, offset, dest_reg, pop_eax)
+#ifdef USE_READ_BARRIER
+ PUSH eax // save registers used in art_quick_aput_obj
+ PUSH ebx
+ PUSH edx
+ PUSH ecx
+ // Outgoing argument set up
+ pushl MACRO_LITERAL((RAW_VAR(offset))) // pass offset, double parentheses are necessary
+ CFI_ADJUST_CFA_OFFSET(4)
+ PUSH RAW_VAR(obj_reg) // pass obj_reg
+ PUSH eax // pass ref, just pass eax for now since parameter ref is unused
+ call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj_reg, offset)
+ // No need to unpoison return value in eax, artReadBarrierSlow() would do the unpoisoning.
+ .ifnc RAW_VAR(dest_reg), eax
+ movl %eax, REG_VAR(dest_reg) // save loaded ref in dest_reg
+ .endif
+ addl MACRO_LITERAL(12), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-12)
+ POP_REG_NE ecx, RAW_VAR(dest_reg) // Restore args except dest_reg
+ POP_REG_NE edx, RAW_VAR(dest_reg)
+ POP_REG_NE ebx, RAW_VAR(dest_reg)
+ .ifc RAW_VAR(pop_eax), true
+ POP_REG_NE eax, RAW_VAR(dest_reg)
+ .endif
+#else
+ movl RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg)
+ UNPOISON_HEAP_REF RAW_VAR(dest_reg)
+#endif // USE_READ_BARRIER
+END_MACRO
+
/*
* Entry from managed code for array put operations of objects where the value being stored
* needs to be checked for compatibility.
@@ -1149,17 +1193,20 @@
DEFINE_FUNCTION art_quick_aput_obj
test %edx, %edx // store of null
jz .Ldo_aput_null
- movl MIRROR_OBJECT_CLASS_OFFSET(%eax), %ebx
- UNPOISON_HEAP_REF ebx
- movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%ebx), %ebx
- UNPOISON_HEAP_REF ebx
+ READ_BARRIER eax, MIRROR_OBJECT_CLASS_OFFSET, ebx, true
+ READ_BARRIER ebx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ebx, true
// value's type == array's component type - trivial assignability
-#ifdef USE_HEAP_POISONING
- PUSH eax // save eax
+#if defined(USE_READ_BARRIER)
+ READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, false
+ cmpl %eax, %ebx
+ POP eax // restore eax from the push in the beginning of READ_BARRIER macro
+#elif defined(USE_HEAP_POISONING)
+ PUSH eax // save eax
+ // Cannot call READ_BARRIER macro here, because the above push messes up stack alignment.
movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax
UNPOISON_HEAP_REF eax
cmpl %eax, %ebx
- POP eax // restore eax
+ POP eax // restore eax
#else
cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ebx
#endif
@@ -1181,6 +1228,8 @@
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
#ifdef USE_HEAP_POISONING
+ // This load does not need read barrier, since edx is unchanged and there's no GC safe point
+ // from last read of MIRROR_OBJECT_CLASS_OFFSET(%edx).
movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax // pass arg2 - type of the value to be stored
UNPOISON_HEAP_REF eax
PUSH eax
@@ -1696,5 +1745,15 @@
UNREACHABLE
END_FUNCTION art_nested_signal_return
+DEFINE_FUNCTION art_quick_read_barrier_slow
+ PUSH edx // pass arg3 - offset
+ PUSH ecx // pass arg2 - obj
+ PUSH eax // pass arg1 - ref
+ call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj, offset)
+ addl LITERAL(12), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-12)
+ ret
+END_FUNCTION art_quick_read_barrier_slow
+
// TODO: implement these!
UNIMPLEMENTED art_quick_memcmp16
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
index 706ae58..cf0039c 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.S
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -24,6 +24,7 @@
#define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1
#define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2
#define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3
+#define MACRO4(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4) .macro macro_name macro_arg1, macro_arg2, macro_arg3, macro_arg4
#define END_MACRO .endm
#if defined(__clang__)
diff --git a/runtime/arch/x86_64/context_x86_64.h b/runtime/arch/x86_64/context_x86_64.h
index c9b0ff6..30bb9ec 100644
--- a/runtime/arch/x86_64/context_x86_64.h
+++ b/runtime/arch/x86_64/context_x86_64.h
@@ -34,7 +34,7 @@
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
void SetSP(uintptr_t new_sp) OVERRIDE {
SetGPR(RSP, new_sp);
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index d0ab9d5..ef1bb5f 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -29,6 +29,9 @@
extern "C" uint32_t art_quick_assignable_from_code(const mirror::Class* klass,
const mirror::Class* ref_class);
+// Read barrier entrypoints.
+extern "C" mirror::Object* art_quick_read_barrier_slow(mirror::Object*, mirror::Object*, uint32_t);
+
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
QuickEntryPoints* qpoints) {
#if defined(__APPLE__)
@@ -145,6 +148,7 @@
// Read barrier
qpoints->pReadBarrierJni = ReadBarrierJni;
+ qpoints->pReadBarrierSlow = art_quick_read_barrier_slow;
#endif // __APPLE__
};
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 7e7d789..f4c9488 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -66,7 +66,6 @@
movq %xmm14, 24(%rsp)
movq %xmm15, 32(%rsp)
// R10 := ArtMethod* for save all callee save frame method.
- THIS_LOAD_REQUIRES_READ_BARRIER
movq RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
// Store ArtMethod* to bottom of stack.
movq %r10, 0(%rsp)
@@ -109,7 +108,6 @@
movq %xmm14, 24(%rsp)
movq %xmm15, 32(%rsp)
// R10 := ArtMethod* for refs only callee save frame method.
- THIS_LOAD_REQUIRES_READ_BARRIER
movq RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
// Store ArtMethod* to bottom of stack.
movq %r10, 0(%rsp)
@@ -168,7 +166,6 @@
subq MACRO_LITERAL(80 + 4 * 8), %rsp
CFI_ADJUST_CFA_OFFSET(80 + 4 * 8)
// R10 := ArtMethod* for ref and args callee save frame method.
- THIS_LOAD_REQUIRES_READ_BARRIER
movq RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
// Save FPRs.
movq %xmm0, 16(%rsp)
@@ -920,8 +917,12 @@
// Fast path tlab allocation.
// RDI: uint32_t type_idx, RSI: ArtMethod*
// RDX, RCX, R8, R9: free. RAX: return val.
+ // TODO: Add read barrier when this function is used.
+ // Might need a special macro since rsi and edx is 32b/64b mismatched.
movl ART_METHOD_DEX_CACHE_TYPES_OFFSET(%rsi), %edx // Load dex cache resolved types array
UNPOISON_HEAP_REF edx
+ // TODO: Add read barrier when this function is used.
+ // Might need to break down into multiple instructions to get the base address in a register.
// Load the class
movl MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdx, %rdi, MIRROR_OBJECT_ARRAY_COMPONENT_SIZE), %edx
UNPOISON_HEAP_REF edx
@@ -1127,19 +1128,23 @@
DEFINE_FUNCTION art_quick_check_cast
PUSH rdi // Save args for exc
PUSH rsi
+ subq LITERAL(8), %rsp // Alignment padding.
+ CFI_ADJUST_CFA_OFFSET(8)
SETUP_FP_CALLEE_SAVE_FRAME
call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
testq %rax, %rax
jz 1f // jump forward if not assignable
RESTORE_FP_CALLEE_SAVE_FRAME
- addq LITERAL(16), %rsp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-16)
+ addq LITERAL(24), %rsp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-24)
ret
- CFI_ADJUST_CFA_OFFSET(16 + 4 * 8) // Reset unwind info so following code unwinds.
+ CFI_ADJUST_CFA_OFFSET(24 + 4 * 8) // Reset unwind info so following code unwinds.
1:
RESTORE_FP_CALLEE_SAVE_FRAME
+ addq LITERAL(8), %rsp // pop padding
+ CFI_ADJUST_CFA_OFFSET(-8)
POP rsi // Pop arguments
POP rdi
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
@@ -1149,6 +1154,60 @@
END_FUNCTION art_quick_check_cast
+// Restore reg's value if reg is not the same as exclude_reg, otherwise just adjust stack.
+MACRO2(POP_REG_NE, reg, exclude_reg)
+ .ifc RAW_VAR(reg), RAW_VAR(exclude_reg)
+ addq MACRO_LITERAL(8), %rsp
+ CFI_ADJUST_CFA_OFFSET(-8)
+ .else
+ POP RAW_VAR(reg)
+ .endif
+END_MACRO
+
+ /*
+ * Macro to insert read barrier, used in art_quick_aput_obj and art_quick_alloc_object_tlab.
+ * obj_reg and dest_reg{32|64} are registers, offset is a defined literal such as
+ * MIRROR_OBJECT_CLASS_OFFSET. dest_reg needs two versions to handle the mismatch between
+ * 64b PUSH/POP and 32b argument.
+ * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
+ *
+ * As with art_quick_aput_obj* functions, the 64b versions are in comments.
+ */
+MACRO4(READ_BARRIER, obj_reg, offset, dest_reg32, dest_reg64)
+#ifdef USE_READ_BARRIER
+ PUSH rax // save registers that might be used
+ PUSH rdi
+ PUSH rsi
+ PUSH rdx
+ PUSH rcx
+ SETUP_FP_CALLEE_SAVE_FRAME
+ // Outgoing argument set up
+ // movl %edi, %edi // pass ref, no-op for now since parameter ref is unused
+ // // movq %rdi, %rdi
+ movl REG_VAR(obj_reg), %esi // pass obj_reg
+ // movq REG_VAR(obj_reg), %rsi
+ movl MACRO_LITERAL((RAW_VAR(offset))), %edx // pass offset, double parentheses are necessary
+ // movq MACRO_LITERAL((RAW_VAR(offset))), %rdx
+ call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj_reg, offset)
+ // No need to unpoison return value in rax, artReadBarrierSlow() would do the unpoisoning.
+ .ifnc RAW_VAR(dest_reg32), eax
+ // .ifnc RAW_VAR(dest_reg64), rax
+ movl %eax, REG_VAR(dest_reg32) // save loaded ref in dest_reg
+ // movq %rax, REG_VAR(dest_reg64)
+ .endif
+ RESTORE_FP_CALLEE_SAVE_FRAME
+ POP_REG_NE rcx, RAW_VAR(dest_reg64) // Restore registers except dest_reg
+ POP_REG_NE rdx, RAW_VAR(dest_reg64)
+ POP_REG_NE rsi, RAW_VAR(dest_reg64)
+ POP_REG_NE rdi, RAW_VAR(dest_reg64)
+ POP_REG_NE rax, RAW_VAR(dest_reg64)
+#else
+ movl RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg32)
+ // movq RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg64)
+ UNPOISON_HEAP_REF RAW_VAR(dest_reg32) // UNPOISON_HEAP_REF only takes a 32b register
+#endif // USE_READ_BARRIER
+END_MACRO
+
/*
* Entry from managed code for array put operations of objects where the value being stored
* needs to be checked for compatibility.
@@ -1193,15 +1252,13 @@
testl %edx, %edx // store of null
// test %rdx, %rdx
jz .Ldo_aput_null
- movl MIRROR_OBJECT_CLASS_OFFSET(%edi), %ecx
-// movq MIRROR_OBJECT_CLASS_OFFSET(%rdi), %rcx
- UNPOISON_HEAP_REF ecx
- movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%ecx), %ecx
-// movq MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%rcx), %rcx
- UNPOISON_HEAP_REF ecx
-#ifdef USE_HEAP_POISONING
- movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax // rax is free.
- UNPOISON_HEAP_REF eax
+ READ_BARRIER edi, MIRROR_OBJECT_CLASS_OFFSET, ecx, rcx
+ // READ_BARRIER rdi, MIRROR_OBJECT_CLASS_OFFSET, ecx, rcx
+ READ_BARRIER ecx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ecx, rcx
+ // READ_BARRIER rcx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ecx, rcx
+#if defined(USE_HEAP_POISONING) || defined(USE_READ_BARRIER)
+ READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, rax // rax is free.
+ // READ_BARRIER rdx, MIRROR_OBJECT_CLASS_OFFSET, eax, rax
cmpl %eax, %ecx // value's type == array's component type - trivial assignability
#else
cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ecx // value's type == array's component type - trivial assignability
@@ -1226,13 +1283,16 @@
PUSH rdi
PUSH rsi
PUSH rdx
- subq LITERAL(8), %rsp // Alignment padding.
- CFI_ADJUST_CFA_OFFSET(8)
SETUP_FP_CALLEE_SAVE_FRAME
- // "Uncompress" = do nothing, as already zero-extended on load.
- movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %esi // Pass arg2 = value's class.
- UNPOISON_HEAP_REF esi
+#if defined(USE_HEAP_POISONING) || defined(USE_READ_BARRIER)
+ // The load of MIRROR_OBJECT_CLASS_OFFSET(%edx) is redundant, eax still holds the value.
+ movl %eax, %esi // Pass arg2 = value's class.
+ // movq %rax, %rsi
+#else
+ // "Uncompress" = do nothing, as already zero-extended on load.
+ movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %esi // Pass arg2 = value's class.
+#endif
movq %rcx, %rdi // Pass arg1 = array's component type.
call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
@@ -1243,8 +1303,6 @@
RESTORE_FP_CALLEE_SAVE_FRAME
// Restore arguments.
- addq LITERAL(8), %rsp
- CFI_ADJUST_CFA_OFFSET(-8)
POP rdx
POP rsi
POP rdi
@@ -1258,12 +1316,10 @@
movb %dl, (%rdx, %rdi) // Note: this assumes that top 32b of %rdi are zero
// movb %dl, (%rdx, %rdi)
ret
- CFI_ADJUST_CFA_OFFSET(32 + 4 * 8) // Reset unwind info so following code unwinds.
+ CFI_ADJUST_CFA_OFFSET(24 + 4 * 8) // Reset unwind info so following code unwinds.
.Lthrow_array_store_exception:
RESTORE_FP_CALLEE_SAVE_FRAME
// Restore arguments.
- addq LITERAL(8), %rsp
- CFI_ADJUST_CFA_OFFSET(-8)
POP rdx
POP rsi
POP rdi
@@ -1717,7 +1773,11 @@
DEFINE_FUNCTION art_quick_assignable_from_code
SETUP_FP_CALLEE_SAVE_FRAME
+ subq LITERAL(8), %rsp // Alignment padding.
+ CFI_ADJUST_CFA_OFFSET(8)
call SYMBOL(artIsAssignableFromCode) // (const mirror::Class*, const mirror::Class*)
+ addq LITERAL(8), %rsp
+ CFI_ADJUST_CFA_OFFSET(-8)
RESTORE_FP_CALLEE_SAVE_FRAME
ret
END_FUNCTION art_quick_assignable_from_code
@@ -1733,3 +1793,14 @@
call PLT_SYMBOL(longjmp)
UNREACHABLE
END_FUNCTION art_nested_signal_return
+
+DEFINE_FUNCTION art_quick_read_barrier_slow
+ SETUP_FP_CALLEE_SAVE_FRAME
+ subq LITERAL(8), %rsp // Alignment padding.
+ CFI_ADJUST_CFA_OFFSET(8)
+ call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj, offset)
+ addq LITERAL(8), %rsp
+ CFI_ADJUST_CFA_OFFSET(-8)
+ RESTORE_FP_CALLEE_SAVE_FRAME
+ ret
+END_FUNCTION art_quick_read_barrier_slow
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index 73beb1f..5138cc9 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -253,7 +253,7 @@
SetObj<kTransactionActive>(object, l);
}
-inline const char* ArtField::GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline const char* ArtField::GetName() SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t field_index = GetDexFieldIndex();
if (UNLIKELY(GetDeclaringClass()->IsProxyClass())) {
DCHECK(IsStatic());
@@ -264,7 +264,7 @@
return dex_file->GetFieldName(dex_file->GetFieldId(field_index));
}
-inline const char* ArtField::GetTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline const char* ArtField::GetTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t field_index = GetDexFieldIndex();
if (UNLIKELY(GetDeclaringClass()->IsProxyClass())) {
DCHECK(IsStatic());
@@ -278,11 +278,11 @@
}
inline Primitive::Type ArtField::GetTypeAsPrimitiveType()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return Primitive::GetType(GetTypeDescriptor()[0]);
}
-inline bool ArtField::IsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline bool ArtField::IsPrimitiveType() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetTypeAsPrimitiveType() != Primitive::kPrimNot;
}
@@ -304,15 +304,15 @@
return type;
}
-inline size_t ArtField::FieldSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline size_t ArtField::FieldSize() SHARED_REQUIRES(Locks::mutator_lock_) {
return Primitive::ComponentSize(GetTypeAsPrimitiveType());
}
-inline mirror::DexCache* ArtField::GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline mirror::DexCache* ArtField::GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetDeclaringClass()->GetDexCache();
}
-inline const DexFile* ArtField::GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline const DexFile* ArtField::GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetDexCache()->GetDexFile();
}
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 7a03723..fa0694b 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -42,27 +42,27 @@
public:
ArtField();
- mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
void SetDeclaringClass(mirror::Class *new_declaring_class)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_);
- void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetAccessFlags(uint32_t new_access_flags) SHARED_REQUIRES(Locks::mutator_lock_) {
// Not called within a transaction.
access_flags_ = new_access_flags;
}
- bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccPublic) != 0;
}
- bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccStatic) != 0;
}
- bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccFinal) != 0;
}
@@ -76,115 +76,116 @@
}
// Offset to field within an Object.
- MemberOffset GetOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ MemberOffset GetOffset() SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset OffsetOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtField, offset_));
}
- MemberOffset GetOffsetDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ MemberOffset GetOffsetDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_);
- void SetOffset(MemberOffset num_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetOffset(MemberOffset num_bytes) SHARED_REQUIRES(Locks::mutator_lock_);
// field access, null object for static fields
- uint8_t GetBoolean(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint8_t GetBoolean(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetBoolean(mirror::Object* object, uint8_t z) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetBoolean(mirror::Object* object, uint8_t z) SHARED_REQUIRES(Locks::mutator_lock_);
- int8_t GetByte(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int8_t GetByte(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetByte(mirror::Object* object, int8_t b) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetByte(mirror::Object* object, int8_t b) SHARED_REQUIRES(Locks::mutator_lock_);
- uint16_t GetChar(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint16_t GetChar(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetChar(mirror::Object* object, uint16_t c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetChar(mirror::Object* object, uint16_t c) SHARED_REQUIRES(Locks::mutator_lock_);
- int16_t GetShort(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int16_t GetShort(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetShort(mirror::Object* object, int16_t s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetShort(mirror::Object* object, int16_t s) SHARED_REQUIRES(Locks::mutator_lock_);
- int32_t GetInt(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t GetInt(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetInt(mirror::Object* object, int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetInt(mirror::Object* object, int32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
- int64_t GetLong(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int64_t GetLong(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetLong(mirror::Object* object, int64_t j) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetLong(mirror::Object* object, int64_t j) SHARED_REQUIRES(Locks::mutator_lock_);
- float GetFloat(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ float GetFloat(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetFloat(mirror::Object* object, float f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetFloat(mirror::Object* object, float f) SHARED_REQUIRES(Locks::mutator_lock_);
- double GetDouble(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ double GetDouble(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetDouble(mirror::Object* object, double d) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetDouble(mirror::Object* object, double d) SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Object* GetObject(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* GetObject(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive>
void SetObject(mirror::Object* object, mirror::Object* l)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Raw field accesses.
- uint32_t Get32(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t Get32(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive>
void Set32(mirror::Object* object, uint32_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- uint64_t Get64(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint64_t Get64(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive>
- void Set64(mirror::Object* object, uint64_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Set64(mirror::Object* object, uint64_t new_value) SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Object* GetObj(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* GetObj(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive>
void SetObj(mirror::Object* object, mirror::Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
template<typename RootVisitorType>
- void VisitRoots(RootVisitorType& visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS;
- bool IsVolatile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsVolatile() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccVolatile) != 0;
}
// Returns an instance field with this offset in the given class or null if not found.
static ArtField* FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns a static field with this offset in the given class or null if not found.
static ArtField* FindStaticFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const char* GetName() SHARED_REQUIRES(Locks::mutator_lock_);
// Resolves / returns the name from the dex cache.
mirror::String* GetStringName(Thread* self, bool resolve)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- const char* GetTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const char* GetTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_);
- Primitive::Type GetTypeAsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Primitive::Type GetTypeAsPrimitiveType() SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsPrimitiveType() SHARED_REQUIRES(Locks::mutator_lock_);
template <bool kResolve>
- mirror::Class* GetType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Class* GetType() SHARED_REQUIRES(Locks::mutator_lock_);
- size_t FieldSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t FieldSize() SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
- const DexFile* GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const DexFile* GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
GcRoot<mirror::Class>& DeclaringClassRoot() {
return declaring_class_;
@@ -192,11 +193,11 @@
private:
mirror::Class* ProxyFindSystemClass(const char* descriptor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Class* ResolveGetType(uint32_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Class* ResolveGetType(uint32_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_);
mirror::String* ResolveGetStringName(Thread* self, const DexFile& dex_file, uint32_t string_idx,
mirror::DexCache* dex_cache)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
GcRoot<mirror::Class> declaring_class_;
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 7673418..f37e040 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -19,6 +19,7 @@
#include "arch/context.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/out.h"
#include "base/stringpiece.h"
#include "dex_file-inl.h"
#include "dex_instruction.h"
@@ -94,7 +95,7 @@
}
static bool HasSameNameAndSignature(ArtMethod* method1, ArtMethod* method2)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedAssertNoThreadSuspension ants(Thread::Current(), "HasSameNameAndSignature");
const DexFile* dex_file = method1->GetDexFile();
const DexFile::MethodId& mid = dex_file->GetMethodId(method1->GetDexMethodIndex());
@@ -455,7 +456,7 @@
// Counts the number of references in the parameter list of the corresponding method.
// Note: Thus does _not_ include "this" for non-static methods.
static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t shorty_len;
const char* shorty = method->GetShorty(&shorty_len);
uint32_t refs = 0;
@@ -565,7 +566,7 @@
const uint8_t* ArtMethod::GetQuickenedInfo() {
bool found = false;
OatFile::OatMethod oat_method =
- Runtime::Current()->GetClassLinker()->FindOatMethodFor(this, &found);
+ Runtime::Current()->GetClassLinker()->FindOatMethodFor(this, outof(found));
if (!found || (oat_method.GetQuickCode() != nullptr)) {
return nullptr;
}
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 4169c5e..90352b7 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -54,24 +54,24 @@
static ArtMethod* FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject jlr_method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE mirror::Class* GetDeclaringClassNoBarrier()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE mirror::Class* GetDeclaringClassUnchecked()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void SetDeclaringClass(mirror::Class *new_declaring_class)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset DeclaringClassOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
}
- ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_);
void SetAccessFlags(uint32_t new_access_flags) {
// Not called within a transaction.
@@ -79,35 +79,35 @@
}
// Approximate what kind of method call would be used for this method.
- InvokeType GetInvokeType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ InvokeType GetInvokeType() SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if the method is declared public.
- bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccPublic) != 0;
}
// Returns true if the method is declared private.
- bool IsPrivate() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrivate() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccPrivate) != 0;
}
// Returns true if the method is declared static.
- bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccStatic) != 0;
}
// Returns true if the method is a constructor.
- bool IsConstructor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsConstructor() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccConstructor) != 0;
}
// Returns true if the method is a class initializer.
- bool IsClassInitializer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsClassInitializer() SHARED_REQUIRES(Locks::mutator_lock_) {
return IsConstructor() && IsStatic();
}
// Returns true if the method is static, private, or a constructor.
- bool IsDirect() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsDirect() SHARED_REQUIRES(Locks::mutator_lock_) {
return IsDirect(GetAccessFlags());
}
@@ -116,56 +116,56 @@
}
// Returns true if the method is declared synchronized.
- bool IsSynchronized() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsSynchronized() SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t synchonized = kAccSynchronized | kAccDeclaredSynchronized;
return (GetAccessFlags() & synchonized) != 0;
}
- bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccFinal) != 0;
}
- bool IsMiranda() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsMiranda() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccMiranda) != 0;
}
- bool IsNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsNative() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccNative) != 0;
}
- bool ShouldNotInline() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool ShouldNotInline() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccDontInline) != 0;
}
- void SetShouldNotInline() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetShouldNotInline() SHARED_REQUIRES(Locks::mutator_lock_) {
SetAccessFlags(GetAccessFlags() | kAccDontInline);
}
- bool IsFastNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsFastNative() SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t mask = kAccFastNative | kAccNative;
return (GetAccessFlags() & mask) == mask;
}
- bool IsAbstract() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsAbstract() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccAbstract) != 0;
}
- bool IsSynthetic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsSynthetic() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccSynthetic) != 0;
}
- bool IsProxyMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsProxyMethod() SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPreverified() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccPreverified) != 0;
}
- void SetPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetPreverified() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!IsPreverified());
SetAccessFlags(GetAccessFlags() | kAccPreverified);
}
- bool IsOptimized(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsOptimized(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) {
// Temporary solution for detecting if a method has been optimized: the compiler
// does not create a GC map. Instead, the vmap table contains the stack map
// (as in stack_map.h).
@@ -175,18 +175,18 @@
&& GetNativeGcMap(pointer_size) == nullptr;
}
- bool CheckIncompatibleClassChange(InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool CheckIncompatibleClassChange(InvokeType type) SHARED_REQUIRES(Locks::mutator_lock_);
- uint16_t GetMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint16_t GetMethodIndex() SHARED_REQUIRES(Locks::mutator_lock_);
// Doesn't do erroneous / unresolved class checks.
- uint16_t GetMethodIndexDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint16_t GetMethodIndexDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_);
- size_t GetVtableIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t GetVtableIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetMethodIndex();
}
- void SetMethodIndex(uint16_t new_method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetMethodIndex(uint16_t new_method_index) SHARED_REQUIRES(Locks::mutator_lock_) {
// Not called within a transaction.
method_index_ = new_method_index;
}
@@ -211,7 +211,7 @@
// Number of 32bit registers that would be required to hold all the arguments
static size_t NumArgRegisters(const StringPiece& shorty);
- ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_REQUIRES(Locks::mutator_lock_);
void SetDexMethodIndex(uint32_t new_idx) {
// Not called within a transaction.
@@ -227,36 +227,36 @@
}
ALWAYS_INLINE mirror::PointerArray* GetDexCacheResolvedMethods()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_idx, size_t ptr_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method,
size_t ptr_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE void SetDexCacheResolvedMethods(mirror::PointerArray* new_dex_cache_methods)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool HasDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool HasDexCacheResolvedMethods() SHARED_REQUIRES(Locks::mutator_lock_);
bool HasSameDexCacheResolvedMethods(ArtMethod* other)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool HasSameDexCacheResolvedMethods(mirror::PointerArray* other_cache)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template <bool kWithCheck = true>
mirror::Class* GetDexCacheResolvedType(uint32_t type_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void SetDexCacheResolvedTypes(mirror::ObjectArray<mirror::Class>* new_dex_cache_types)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool HasDexCacheResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool HasSameDexCacheResolvedTypes(ArtMethod* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool HasDexCacheResolvedTypes() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool HasSameDexCacheResolvedTypes(ArtMethod* other) SHARED_REQUIRES(Locks::mutator_lock_);
bool HasSameDexCacheResolvedTypes(mirror::ObjectArray<mirror::Class>* other_cache)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get the Class* from the type index into this method's dex cache.
mirror::Class* GetClassFromTypeIndex(uint16_t type_idx, bool resolve)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Find the method that this method overrides.
- ArtMethod* FindOverriddenMethod(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* FindOverriddenMethod(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
// Find the method index for this method within other_dexfile. If this method isn't present then
// return DexFile::kDexNoIndex. The name_and_signature_idx MUST refer to a MethodId with the same
@@ -264,10 +264,10 @@
// in the other_dexfile.
uint32_t FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile,
uint32_t name_and_signature_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result, const char* shorty)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const void* GetEntryPointFromQuickCompiledCode() {
return GetEntryPointFromQuickCompiledCodePtrSize(sizeof(void*));
@@ -287,7 +287,7 @@
entry_point_from_quick_compiled_code, pointer_size);
}
- uint32_t GetCodeSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t GetCodeSize() SHARED_REQUIRES(Locks::mutator_lock_);
// Check whether the given PC is within the quick compiled code associated with this method's
// quick entrypoint. This code isn't robust for instrumentation, etc. and is only used for
@@ -297,12 +297,12 @@
reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode()), pc);
}
- void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if the entrypoint points to the interpreter, as
// opposed to the compiled code, that is, this method will be
// interpretered on invocation.
- bool IsEntrypointInterpreter() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsEntrypointInterpreter() SHARED_REQUIRES(Locks::mutator_lock_);
uint32_t GetQuickOatCodeOffset();
void SetQuickOatCodeOffset(uint32_t code_offset);
@@ -317,37 +317,37 @@
// Actual entry point pointer to compiled oat code or null.
const void* GetQuickOatEntryPoint(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Actual pointer to compiled oat code or null.
const void* GetQuickOatCodePointer(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
}
// Callers should wrap the uint8_t* in a MappingTable instance for convenient access.
const uint8_t* GetMappingTable(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const uint8_t* GetMappingTable(const void* code_pointer, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Callers should wrap the uint8_t* in a VmapTable instance for convenient access.
const uint8_t* GetVmapTable(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const uint8_t* GetVmapTable(const void* code_pointer, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- const uint8_t* GetQuickenedInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const uint8_t* GetQuickenedInfo() SHARED_REQUIRES(Locks::mutator_lock_);
- CodeInfo GetOptimizedCodeInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ CodeInfo GetOptimizedCodeInfo() SHARED_REQUIRES(Locks::mutator_lock_);
// Callers should wrap the uint8_t* in a GcMap instance for convenient access.
const uint8_t* GetNativeGcMap(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const uint8_t* GetNativeGcMap(const void* code_pointer, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template <bool kCheckFrameSize = true>
- uint32_t GetFrameSizeInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t GetFrameSizeInBytes() SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t result = GetQuickFrameInfo().FrameSizeInBytes();
if (kCheckFrameSize) {
DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
@@ -355,30 +355,30 @@
return result;
}
- QuickMethodFrameInfo GetQuickFrameInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ QuickMethodFrameInfo GetQuickFrameInfo() SHARED_REQUIRES(Locks::mutator_lock_);
QuickMethodFrameInfo GetQuickFrameInfo(const void* code_pointer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- FrameOffset GetReturnPcOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FrameOffset GetReturnPcOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetReturnPcOffset(GetFrameSizeInBytes());
}
FrameOffset GetReturnPcOffset(uint32_t frame_size_in_bytes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK_EQ(frame_size_in_bytes, GetFrameSizeInBytes());
return FrameOffset(frame_size_in_bytes - sizeof(void*));
}
- FrameOffset GetHandleScopeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FrameOffset GetHandleScopeOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
constexpr size_t handle_scope_offset = sizeof(ArtMethod*);
DCHECK_LT(handle_scope_offset, GetFrameSizeInBytes());
return FrameOffset(handle_scope_offset);
}
void RegisterNative(const void* native_method, bool is_fast)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- void UnregisterNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void UnregisterNative() SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset EntryPointFromJniOffset(size_t pointer_size) {
return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
@@ -397,7 +397,7 @@
return GetEntryPoint<void*>(EntryPointFromJniOffset(pointer_size), pointer_size);
}
- void SetEntryPointFromJni(const void* entrypoint) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetEntryPointFromJni(const void* entrypoint) SHARED_REQUIRES(Locks::mutator_lock_) {
SetEntryPointFromJniPtrSize(entrypoint, sizeof(void*));
}
ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, size_t pointer_size) {
@@ -409,34 +409,34 @@
ALWAYS_INLINE bool IsRuntimeMethod();
// Is this a hand crafted method used for something like describing callee saves?
- bool IsCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsCalleeSaveMethod() SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsResolutionMethod() SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsImtConflictMethod() SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsImtUnimplementedMethod() SHARED_REQUIRES(Locks::mutator_lock_);
- uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
#ifdef NDEBUG
uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
}
#else
uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
#endif
// Converts a native PC to a dex PC.
uint32_t ToDexPc(const uintptr_t pc, bool abort_on_failure = true)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Converts a dex PC to a native PC.
uintptr_t ToNativeQuickPc(const uint32_t dex_pc, bool abort_on_failure = true)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- MethodReference ToMethodReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ MethodReference ToMethodReference() SHARED_REQUIRES(Locks::mutator_lock_) {
return MethodReference(GetDexFile(), GetDexMethodIndex());
}
@@ -445,63 +445,64 @@
// a move-exception instruction is present.
uint32_t FindCatchBlock(Handle<mirror::Class> exception_type, uint32_t dex_pc,
bool* has_no_move_exception)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
template<typename RootVisitorType>
- void VisitRoots(RootVisitorType& visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS;
- const DexFile* GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const DexFile* GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
- const char* GetDeclaringClassDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const char* GetDeclaringClassDescriptor() SHARED_REQUIRES(Locks::mutator_lock_);
- const char* GetShorty() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const char* GetShorty() SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t unused_length;
return GetShorty(&unused_length);
}
- const char* GetShorty(uint32_t* out_length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const char* GetShorty(uint32_t* out_length) SHARED_REQUIRES(Locks::mutator_lock_);
- const Signature GetSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const Signature GetSignature() SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE const char* GetName() SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::String* GetNameAsString(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::String* GetNameAsString(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
- const DexFile::CodeItem* GetCodeItem() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const DexFile::CodeItem* GetCodeItem() SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsResolvedTypeIdx(uint16_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsResolvedTypeIdx(uint16_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_);
- int32_t GetLineNumFromDexPC(uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t GetLineNumFromDexPC(uint32_t dex_pc) SHARED_REQUIRES(Locks::mutator_lock_);
- const DexFile::ProtoId& GetPrototype() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const DexFile::ProtoId& GetPrototype() SHARED_REQUIRES(Locks::mutator_lock_);
- const DexFile::TypeList* GetParameterTypeList() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const DexFile::TypeList* GetParameterTypeList() SHARED_REQUIRES(Locks::mutator_lock_);
- const char* GetDeclaringClassSourceFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const char* GetDeclaringClassSourceFile() SHARED_REQUIRES(Locks::mutator_lock_);
- uint16_t GetClassDefIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint16_t GetClassDefIndex() SHARED_REQUIRES(Locks::mutator_lock_);
- const DexFile::ClassDef& GetClassDef() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const DexFile::ClassDef& GetClassDef() SHARED_REQUIRES(Locks::mutator_lock_);
- const char* GetReturnTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const char* GetReturnTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_);
const char* GetTypeDescriptorFromTypeIdx(uint16_t type_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// May cause thread suspension due to GetClassFromTypeIdx calling ResolveType this caused a large
// number of bugs at call sites.
- mirror::Class* GetReturnType(bool resolve = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Class* GetReturnType(bool resolve = true) SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::ClassLoader* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// May cause thread suspension due to class resolution.
bool EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Size of an instance of this object.
static size_t ObjectSize(size_t pointer_size) {
@@ -510,10 +511,10 @@
}
void CopyFrom(const ArtMethod* src, size_t image_pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE mirror::ObjectArray<mirror::Class>* GetDexCacheResolvedTypes()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index b1d0841..350a0d4 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -103,17 +103,20 @@
ADD_TEST_EQ(THREAD_TOP_QUICK_FRAME_OFFSET,
art::Thread::TopOfManagedStackOffset<__SIZEOF_POINTER__>().Int32Value())
-// Offset of field Thread::tlsPtr_.managed_stack.top_quick_frame_.
+// Offset of field Thread::tlsPtr_.self.
#define THREAD_SELF_OFFSET (THREAD_CARD_TABLE_OFFSET + (9 * __SIZEOF_POINTER__))
ADD_TEST_EQ(THREAD_SELF_OFFSET,
art::Thread::SelfOffset<__SIZEOF_POINTER__>().Int32Value())
-#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 150 * __SIZEOF_POINTER__)
+// Offset of field Thread::tlsPtr_.thread_local_pos.
+#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 151 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET,
art::Thread::ThreadLocalPosOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.thread_local_end.
#define THREAD_LOCAL_END_OFFSET (THREAD_LOCAL_POS_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET,
art::Thread::ThreadLocalEndOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.thread_local_objects.
#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_POS_OFFSET + 2 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
art::Thread::ThreadLocalObjectsOffset<__SIZEOF_POINTER__>().Int32Value())
diff --git a/runtime/barrier.h b/runtime/barrier.h
index 0e7f61e..02f9f58 100644
--- a/runtime/barrier.h
+++ b/runtime/barrier.h
@@ -39,10 +39,10 @@
virtual ~Barrier();
// Pass through the barrier, decrement the count but do not block.
- void Pass(Thread* self);
+ void Pass(Thread* self) REQUIRES(!lock_);
// Wait on the barrier, decrement the count.
- void Wait(Thread* self);
+ void Wait(Thread* self) REQUIRES(!lock_);
// The following three calls are only safe if we somehow know that no other thread both
// - has been woken up, and
@@ -51,18 +51,18 @@
// to sleep, resulting in a deadlock.
// Increment the count by delta, wait on condition if count is non zero.
- void Increment(Thread* self, int delta) LOCKS_EXCLUDED(lock_);
+ void Increment(Thread* self, int delta) REQUIRES(!lock_);;
// Increment the count by delta, wait on condition if count is non zero, with a timeout. Returns
// true if time out occurred.
- bool Increment(Thread* self, int delta, uint32_t timeout_ms) LOCKS_EXCLUDED(lock_);
+ bool Increment(Thread* self, int delta, uint32_t timeout_ms) REQUIRES(!lock_);
// Set the count to a new value. This should only be used if there is no possibility that
// another thread is still in Wait(). See above.
- void Init(Thread* self, int count);
+ void Init(Thread* self, int count) REQUIRES(!lock_);
private:
- void SetCountLocked(Thread* self, int count) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void SetCountLocked(Thread* self, int count) REQUIRES(lock_);
// Counter, when this reaches 0 all people blocked on the barrier are signalled.
int count_ GUARDED_BY(lock_);
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index d977941..c4b36ee 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -182,12 +182,12 @@
public:
explicit ArenaPool(bool use_malloc = true, bool low_4gb = false);
~ArenaPool();
- Arena* AllocArena(size_t size) LOCKS_EXCLUDED(lock_);
- void FreeArenaChain(Arena* first) LOCKS_EXCLUDED(lock_);
- size_t GetBytesAllocated() const LOCKS_EXCLUDED(lock_);
+ Arena* AllocArena(size_t size) REQUIRES(!lock_);
+ void FreeArenaChain(Arena* first) REQUIRES(!lock_);
+ size_t GetBytesAllocated() const REQUIRES(!lock_);
// Trim the maps in arenas by madvising, used by JIT to reduce memory usage. This only works
// use_malloc is false.
- void TrimMaps() LOCKS_EXCLUDED(lock_);
+ void TrimMaps() REQUIRES(!lock_);
private:
const bool use_malloc_;
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index 709d9ae..d110fe3 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -470,31 +470,31 @@
}
void DeallocateStorage() {
- if (num_buckets_ != 0) {
- if (owns_data_) {
- for (size_t i = 0; i < NumBuckets(); ++i) {
- allocfn_.destroy(allocfn_.address(data_[i]));
- }
- allocfn_.deallocate(data_, NumBuckets());
- owns_data_ = false;
+ if (owns_data_) {
+ for (size_t i = 0; i < NumBuckets(); ++i) {
+ allocfn_.destroy(allocfn_.address(data_[i]));
}
- data_ = nullptr;
- num_buckets_ = 0;
+ if (data_ != nullptr) {
+ allocfn_.deallocate(data_, NumBuckets());
+ }
+ owns_data_ = false;
}
+ data_ = nullptr;
+ num_buckets_ = 0;
}
// Expand the set based on the load factors.
void Expand() {
size_t min_index = static_cast<size_t>(Size() / min_load_factor_);
- if (min_index < kMinBuckets) {
- min_index = kMinBuckets;
- }
// Resize based on the minimum load factor.
Resize(min_index);
}
// Expand / shrink the table to the new specified size.
void Resize(size_t new_size) {
+ if (new_size < kMinBuckets) {
+ new_size = kMinBuckets;
+ }
DCHECK_GE(new_size, Size());
T* const old_data = data_;
size_t old_num_buckets = num_buckets_;
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 859de4b..7a620e3 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -26,7 +26,7 @@
#include "utils.h"
// Headers for LogMessage::LogLine.
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
#include "cutils/log.h"
#else
#include <sys/types.h>
@@ -47,7 +47,7 @@
// Print INTERNAL_FATAL messages directly instead of at destruction time. This only works on the
// host right now: for the device, a stream buf collating output into lines and calling LogLine or
// lower-level logging is necessary.
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
static constexpr bool kPrintInternalFatalDirectly = false;
#else
static constexpr bool kPrintInternalFatalDirectly = !kIsTargetBuild;
@@ -234,7 +234,7 @@
return data_->GetBuffer();
}
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
static const android_LogPriority kLogSeverityToAndroidLogPriority[] = {
ANDROID_LOG_VERBOSE, ANDROID_LOG_DEBUG, ANDROID_LOG_INFO, ANDROID_LOG_WARN,
ANDROID_LOG_ERROR, ANDROID_LOG_FATAL, ANDROID_LOG_FATAL
@@ -245,7 +245,7 @@
void LogMessage::LogLine(const char* file, unsigned int line, LogSeverity log_severity,
const char* message) {
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
const char* tag = ProgramInvocationShortName();
int priority = kLogSeverityToAndroidLogPriority[log_severity];
if (priority == ANDROID_LOG_FATAL) {
@@ -264,7 +264,7 @@
void LogMessage::LogLineLowStack(const char* file, unsigned int line, LogSeverity log_severity,
const char* message) {
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
// Use android_writeLog() to avoid stack-based buffers used by android_printLog().
const char* tag = ProgramInvocationShortName();
int priority = kLogSeverityToAndroidLogPriority[log_severity];
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 93d4edc..2cd1a4d 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -237,7 +237,7 @@
public:
LogMessage(const char* file, unsigned int line, LogSeverity severity, int error);
- ~LogMessage(); // TODO: enable LOCKS_EXCLUDED(Locks::logging_lock_).
+ ~LogMessage(); // TODO: enable REQUIRES(!Locks::logging_lock_).
// Returns the stream associated with the message, the LogMessage performs output when it goes
// out of scope.
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 5c59647..1d5dee2 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -244,18 +244,14 @@
#define ACQUIRED_AFTER(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
#define ACQUIRED_BEFORE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
-#define EXCLUSIVE_LOCKS_REQUIRED(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
#define GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(guarded)
-#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable)
#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
-#define LOCKS_EXCLUDED(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
#define PT_GUARDED_BY(x)
// THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded_by(x))
#define PT_GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded)
#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
-#define SHARED_LOCKS_REQUIRED(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
#if defined(__clang__)
#define EXCLUSIVE_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
@@ -263,12 +259,43 @@
#define SHARED_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
#define SHARED_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
#define UNLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
+#define REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
+#define SHARED_REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
+#define CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(capability(__VA_ARGS__))
+#define SHARED_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_capability(__VA_ARGS__))
+#define ASSERT_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(__VA_ARGS__))
+#define ASSERT_SHARED_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(__VA_ARGS__))
+#define RETURN_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(__VA_ARGS__))
+#define TRY_ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
+#define TRY_ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
+#define ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
+#define ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
+#define RELEASE(...) THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
+#define RELEASE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
+#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
#else
#define EXCLUSIVE_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock(__VA_ARGS__))
#define EXCLUSIVE_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock(__VA_ARGS__))
#define SHARED_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_lock(__VA_ARGS__))
#define SHARED_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock(__VA_ARGS__))
#define UNLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(unlock(__VA_ARGS__))
+#define REQUIRES(...)
+#define SHARED_REQUIRES(...)
+#define CAPABILITY(...)
+#define SHARED_CAPABILITY(...)
+#define ASSERT_CAPABILITY(...)
+#define ASSERT_SHARED_CAPABILITY(...)
+#define RETURN_CAPABILITY(...)
+#define TRY_ACQUIRE(...)
+#define TRY_ACQUIRE_SHARED(...)
+#define ACQUIRE(...)
+#define ACQUIRE_SHARED(...)
+#define RELEASE(...)
+#define RELEASE_SHARED(...)
+#define SCOPED_CAPABILITY
#endif
+#define LOCKABLE CAPABILITY("mutex")
+#define SHARED_LOCKABLE SHARED_CAPABILITY("mutex")
+
#endif // ART_RUNTIME_BASE_MACROS_H_
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index c591a51..62cfb52 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -62,6 +62,7 @@
Mutex* Locks::trace_lock_ = nullptr;
Mutex* Locks::unexpected_signal_lock_ = nullptr;
Mutex* Locks::lambda_table_lock_ = nullptr;
+Uninterruptible Roles::uninterruptible_;
struct AllMutexData {
// A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 5b258e5..d0504d9 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -43,8 +43,8 @@
namespace art {
-class LOCKABLE ReaderWriterMutex;
-class LOCKABLE MutatorMutex;
+class SHARED_LOCKABLE ReaderWriterMutex;
+class SHARED_LOCKABLE MutatorMutex;
class ScopedContentionRecorder;
class Thread;
@@ -214,35 +214,37 @@
virtual bool IsMutex() const { return true; }
// Block until mutex is free then acquire exclusive access.
- void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
- void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() { ExclusiveLock(self); }
+ void ExclusiveLock(Thread* self) ACQUIRE();
+ void Lock(Thread* self) ACQUIRE() { ExclusiveLock(self); }
// Returns true if acquires exclusive access, false otherwise.
- bool ExclusiveTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
- bool TryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true) { return ExclusiveTryLock(self); }
+ bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true);
+ bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); }
// Release exclusive access.
- void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
- void Unlock(Thread* self) UNLOCK_FUNCTION() { ExclusiveUnlock(self); }
+ void ExclusiveUnlock(Thread* self) RELEASE();
+ void Unlock(Thread* self) RELEASE() { ExclusiveUnlock(self); }
// Is the current thread the exclusive holder of the Mutex.
bool IsExclusiveHeld(const Thread* self) const;
// Assert that the Mutex is exclusively held by the current thread.
- void AssertExclusiveHeld(const Thread* self) {
+ void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
if (kDebugLocking && (gAborting == 0)) {
CHECK(IsExclusiveHeld(self)) << *this;
}
}
- void AssertHeld(const Thread* self) { AssertExclusiveHeld(self); }
+ void AssertHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
// Assert that the Mutex is not held by the current thread.
- void AssertNotHeldExclusive(const Thread* self) {
+ void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) {
if (kDebugLocking && (gAborting == 0)) {
CHECK(!IsExclusiveHeld(self)) << *this;
}
}
- void AssertNotHeld(const Thread* self) { AssertNotHeldExclusive(self); }
+ void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!*this) {
+ AssertNotHeldExclusive(self);
+ }
// Id associated with exclusive owner. No memory ordering semantics if called from a thread other
// than the owner.
@@ -255,6 +257,9 @@
virtual void Dump(std::ostream& os) const;
+ // For negative capabilities in clang annotations.
+ const Mutex& operator!() const { return *this; }
+
private:
#if ART_USE_FUTEXES
// 0 is unheld, 1 is held.
@@ -290,7 +295,7 @@
// Shared(n) | Block | error | SharedLock(n+1)* | Shared(n-1) or Free
// * for large values of n the SharedLock may block.
std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
-class LOCKABLE ReaderWriterMutex : public BaseMutex {
+class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
public:
explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
~ReaderWriterMutex();
@@ -298,12 +303,12 @@
virtual bool IsReaderWriterMutex() const { return true; }
// Block until ReaderWriterMutex is free then acquire exclusive access.
- void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
- void WriterLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() { ExclusiveLock(self); }
+ void ExclusiveLock(Thread* self) ACQUIRE();
+ void WriterLock(Thread* self) ACQUIRE() { ExclusiveLock(self); }
// Release exclusive access.
- void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
- void WriterUnlock(Thread* self) UNLOCK_FUNCTION() { ExclusiveUnlock(self); }
+ void ExclusiveUnlock(Thread* self) RELEASE();
+ void WriterUnlock(Thread* self) RELEASE() { ExclusiveUnlock(self); }
// Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
// or false if timeout is reached.
@@ -313,15 +318,15 @@
#endif
// Block until ReaderWriterMutex is shared or free then acquire a share on the access.
- void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
- void ReaderLock(Thread* self) SHARED_LOCK_FUNCTION() { SharedLock(self); }
+ void SharedLock(Thread* self) ACQUIRE_SHARED() ALWAYS_INLINE;
+ void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); }
// Try to acquire share of ReaderWriterMutex.
- bool SharedTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
+ bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true);
// Release a share of the access.
- void SharedUnlock(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
- void ReaderUnlock(Thread* self) UNLOCK_FUNCTION() { SharedUnlock(self); }
+ void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE;
+ void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); }
// Is the current thread the exclusive holder of the ReaderWriterMutex.
bool IsExclusiveHeld(const Thread* self) const;
@@ -368,6 +373,9 @@
virtual void Dump(std::ostream& os) const;
+ // For negative capabilities in clang annotations.
+ const ReaderWriterMutex& operator!() const { return *this; }
+
private:
#if ART_USE_FUTEXES
// Out-of-inline path for handling contention for a SharedLock.
@@ -402,7 +410,7 @@
// suspended states before exclusive ownership of the mutator mutex is sought.
//
std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu);
-class LOCKABLE MutatorMutex : public ReaderWriterMutex {
+class SHARED_LOCKABLE MutatorMutex : public ReaderWriterMutex {
public:
explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel)
: ReaderWriterMutex(name, level) {}
@@ -410,6 +418,9 @@
virtual bool IsMutatorMutex() const { return true; }
+ // For negative capabilities in clang annotations.
+ const MutatorMutex& operator!() const { return *this; }
+
private:
friend class Thread;
void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
@@ -458,13 +469,13 @@
// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
// upon destruction.
-class SCOPED_LOCKABLE MutexLock {
+class SCOPED_CAPABILITY MutexLock {
public:
- explicit MutexLock(Thread* self, Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : self_(self), mu_(mu) {
+ explicit MutexLock(Thread* self, Mutex& mu) ACQUIRE(mu) : self_(self), mu_(mu) {
mu_.ExclusiveLock(self_);
}
- ~MutexLock() UNLOCK_FUNCTION() {
+ ~MutexLock() RELEASE() {
mu_.ExclusiveUnlock(self_);
}
@@ -478,14 +489,14 @@
// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
// construction and releases it upon destruction.
-class SCOPED_LOCKABLE ReaderMutexLock {
+class SCOPED_CAPABILITY ReaderMutexLock {
public:
- explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
+ explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) :
self_(self), mu_(mu) {
mu_.SharedLock(self_);
}
- ~ReaderMutexLock() UNLOCK_FUNCTION() {
+ ~ReaderMutexLock() RELEASE() {
mu_.SharedUnlock(self_);
}
@@ -500,7 +511,7 @@
// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
// construction and releases it upon destruction.
-class SCOPED_LOCKABLE WriterMutexLock {
+class SCOPED_CAPABILITY WriterMutexLock {
public:
explicit WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
self_(self), mu_(mu) {
@@ -520,6 +531,17 @@
// "WriterMutexLock mu(lock)".
#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
+// For StartNoThreadSuspension and EndNoThreadSuspension.
+class CAPABILITY("role") Role {
+ public:
+ void Acquire() ACQUIRE() {}
+ void Release() RELEASE() {}
+ const Role& operator!() const { return *this; }
+};
+
+class Uninterruptible : public Role {
+};
+
// Global mutexes corresponding to the levels above.
class Locks {
public:
@@ -655,6 +677,11 @@
static Mutex* lambda_table_lock_ ACQUIRED_AFTER(mutator_lock_);
};
+class Roles {
+ public:
+ static Uninterruptible uninterruptible_;
+};
+
} // namespace art
#endif // ART_RUNTIME_BASE_MUTEX_H_
diff --git a/runtime/base/mutex_test.cc b/runtime/base/mutex_test.cc
index 3750c81..340550f 100644
--- a/runtime/base/mutex_test.cc
+++ b/runtime/base/mutex_test.cc
@@ -101,18 +101,18 @@
: mu("test mutex", kDefaultMutexLevel, true), cv("test condition variable", mu) {
}
- static void* Callback(void* arg) {
- RecursiveLockWait* state = reinterpret_cast<RecursiveLockWait*>(arg);
- state->mu.Lock(Thread::Current());
- state->cv.Signal(Thread::Current());
- state->mu.Unlock(Thread::Current());
- return nullptr;
- }
-
Mutex mu;
ConditionVariable cv;
};
+static void* RecursiveLockWaitCallback(void* arg) {
+ RecursiveLockWait* state = reinterpret_cast<RecursiveLockWait*>(arg);
+ state->mu.Lock(Thread::Current());
+ state->cv.Signal(Thread::Current());
+ state->mu.Unlock(Thread::Current());
+ return nullptr;
+}
+
// GCC has trouble with our mutex tests, so we have to turn off thread safety analysis.
static void RecursiveLockWaitTest() NO_THREAD_SAFETY_ANALYSIS {
RecursiveLockWait state;
@@ -120,8 +120,7 @@
state.mu.Lock(Thread::Current());
pthread_t pthread;
- int pthread_create_result = pthread_create(&pthread, nullptr, RecursiveLockWait::Callback,
- &state);
+ int pthread_create_result = pthread_create(&pthread, nullptr, RecursiveLockWaitCallback, &state);
ASSERT_EQ(0, pthread_create_result);
state.cv.Wait(Thread::Current());
diff --git a/runtime/base/out.h b/runtime/base/out.h
new file mode 100644
index 0000000..7b4bc12
--- /dev/null
+++ b/runtime/base/out.h
@@ -0,0 +1,279 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_OUT_H_
+#define ART_RUNTIME_BASE_OUT_H_
+
+#include <base/macros.h>
+#include <base/logging.h>
+
+#include <memory>
+// A zero-overhead abstraction marker that means this value is meant to be used as an out
+// parameter for functions. It mimics semantics of a pointer that the function will
+// dereference and output its value into.
+//
+// Inspired by the 'out' language keyword in C#.
+//
+// Declaration example:
+// int do_work(size_t args, out<int> result);
+// // returns 0 on success, sets result, otherwise error code
+//
+// Use-site example:
+// // (1) -- out of a local variable or field
+// int res;
+// if (do_work(1, outof(res)) {
+// cout << "success: " << res;
+// }
+// // (2) -- out of an iterator
+// std::vector<int> list = {1};
+// std::vector<int>::iterator it = list.begin();
+// if (do_work(2, outof_iterator(*it)) {
+// cout << "success: " << list[0];
+// }
+// // (3) -- out of a pointer
+// int* array = &some_other_value;
+// if (do_work(3, outof_ptr(array))) {
+// cout << "success: " << *array;
+// }
+//
+// The type will also automatically decay into a C-style pointer for compatibility
+// with calling legacy code that expect pointers.
+//
+// Declaration example:
+// void write_data(int* res) { *res = 5; }
+//
+// Use-site example:
+// int data;
+// write_data(outof(res));
+// // data is now '5'
+// (The other outof_* functions can be used analogously when the target is a C-style pointer).
+//
+// ---------------
+//
+// Other typical pointer operations such as addition, subtraction, etc are banned
+// since there is exactly one value being output.
+//
+namespace art {
+
+// Forward declarations. See below for specific functions.
+template <typename T>
+struct out_convertible; // Implicitly converts to out<T> or T*.
+
+// Helper function that automatically infers 'T'
+//
+// Returns a type that is implicitly convertible to either out<T> or T* depending
+// on the call site.
+//
+// Example:
+// int do_work(size_t args, out<int> result);
+// // returns 0 on success, sets result, otherwise error code
+//
+// Usage:
+// int res;
+// if (do_work(1, outof(res)) {
+// cout << "success: " << res;
+// }
+template <typename T>
+out_convertible<T> outof(T& param) ALWAYS_INLINE;
+
+// Helper function that automatically infers 'T' from a container<T>::iterator.
+// To use when the argument is already inside an iterator.
+//
+// Returns a type that is implicitly convertible to either out<T> or T* depending
+// on the call site.
+//
+// Example:
+// int do_work(size_t args, out<int> result);
+// // returns 0 on success, sets result, otherwise error code
+//
+// Usage:
+// std::vector<int> list = {1};
+// std::vector<int>::iterator it = list.begin();
+// if (do_work(2, outof_iterator(*it)) {
+// cout << "success: " << list[0];
+// }
+template <typename It>
+auto ALWAYS_INLINE outof_iterator(It iter)
+ -> out_convertible<typename std::remove_reference<decltype(*iter)>::type>;
+
+// Helper function that automatically infers 'T'.
+// To use when the argument is already a pointer.
+//
+// ptr must be not-null, else a DCHECK failure will occur.
+//
+// Returns a type that is implicitly convertible to either out<T> or T* depending
+// on the call site.
+//
+// Example:
+// int do_work(size_t args, out<int> result);
+// // returns 0 on success, sets result, otherwise error code
+//
+// Usage:
+// int* array = &some_other_value;
+// if (do_work(3, outof_ptr(array))) {
+// cout << "success: " << *array;
+// }
+template <typename T>
+out_convertible<T> outof_ptr(T* ptr) ALWAYS_INLINE;
+
+// Zero-overhead wrapper around a non-null non-const pointer meant to be used to output
+// the result of parameters. There are no other extra guarantees.
+//
+// The most common use case is to treat this like a typical pointer argument, for example:
+//
+// void write_out_5(out<int> x) {
+// *x = 5;
+// }
+//
+// The following operations are supported:
+// operator* -> use like a pointer (guaranteed to be non-null)
+// == and != -> compare against other pointers for (in)equality
+// begin/end -> use in standard C++ algorithms as if it was an iterator
+template <typename T>
+struct out {
+ // Has to be mutable lref. Otherwise how would you write something as output into it?
+ explicit inline out(T& param)
+ : param_(param) {}
+
+ // Model a single-element iterator (or pointer) to the parameter.
+ inline T& operator *() {
+ return param_;
+ }
+
+ // Model dereferencing fields/methods on a pointer.
+ inline T* operator->() {
+ return std::addressof(param_);
+ }
+
+ //
+ // Comparison against this or other pointers.
+ //
+ template <typename T2>
+ inline bool operator==(const T2* other) const {
+ return std::addressof(param_) == other;
+ }
+
+ template <typename T2>
+ inline bool operator==(const out<T>& other) const {
+ return std::addressof(param_) == std::addressof(other.param_);
+ }
+
+ // An out-parameter is never null.
+ inline bool operator==(std::nullptr_t) const {
+ return false;
+ }
+
+ template <typename T2>
+ inline bool operator!=(const T2* other) const {
+ return std::addressof(param_) != other;
+ }
+
+ template <typename T2>
+ inline bool operator!=(const out<T>& other) const {
+ return std::addressof(param_) != std::addressof(other.param_);
+ }
+
+ // An out-parameter is never null.
+ inline bool operator!=(std::nullptr_t) const {
+ return true;
+ }
+
+ //
+ // Iterator interface implementation. Use with standard algorithms.
+ // TODO: (add items in iterator_traits if this is truly useful).
+ //
+
+ inline T* begin() {
+ return std::addressof(param_);
+ }
+
+ inline const T* begin() const {
+ return std::addressof(param_);
+ }
+
+ inline T* end() {
+ return std::addressof(param_) + 1;
+ }
+
+ inline const T* end() const {
+ return std::addressof(param_) + 1;
+ }
+
+ private:
+ T& param_;
+};
+
+//
+// IMPLEMENTATION DETAILS
+//
+
+//
+// This intermediate type should not be used directly by user code.
+//
+// It enables 'outof(x)' to be passed into functions that expect either
+// an out<T> **or** a regular C-style pointer (T*).
+//
+template <typename T>
+struct out_convertible {
+ explicit inline out_convertible(T& param)
+ : param_(param) {
+ }
+
+ // Implicitly convert into an out<T> for standard usage.
+ inline operator out<T>() {
+ return out<T>(param_);
+ }
+
+ // Implicitly convert into a '*' for legacy usage.
+ inline operator T*() {
+ return std::addressof(param_);
+ }
+ private:
+ T& param_;
+};
+
+// Helper function that automatically infers 'T'
+template <typename T>
+inline out_convertible<T> outof(T& param) {
+ return out_convertible<T>(param);
+}
+
+// Helper function that automatically infers 'T'.
+// To use when the argument is already inside an iterator.
+template <typename It>
+inline auto outof_iterator(It iter)
+ -> out_convertible<typename std::remove_reference<decltype(*iter)>::type> {
+ return outof(*iter);
+}
+
+// Helper function that automatically infers 'T'.
+// To use when the argument is already a pointer.
+template <typename T>
+inline out_convertible<T> outof_ptr(T* ptr) {
+ DCHECK(ptr != nullptr);
+ return outof(*ptr);
+}
+
+// Helper function that automatically infers 'T'.
+// Forwards an out parameter from one function into another.
+template <typename T>
+inline out_convertible<T> outof_forward(out<T>& out_param) {
+ T& param = *out_param;
+ return out_convertible<T>(param);
+}
+
+} // namespace art
+#endif // ART_RUNTIME_BASE_OUT_H_
diff --git a/runtime/base/out_fwd.h b/runtime/base/out_fwd.h
new file mode 100644
index 0000000..6b2f926
--- /dev/null
+++ b/runtime/base/out_fwd.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_OUT_FWD_H_
+#define ART_RUNTIME_BASE_OUT_FWD_H_
+
+// Forward declaration for "out<T>". See <out.h> for more information.
+// Other headers use only the forward declaration.
+
+// Callers of functions that take an out<T> parameter should #include <out.h> to get outof_.
+// which constructs out<T> through type inference.
+namespace art {
+template <typename T>
+struct out;
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_OUT_FWD_H_
diff --git a/runtime/base/out_test.cc b/runtime/base/out_test.cc
new file mode 100644
index 0000000..4274200
--- /dev/null
+++ b/runtime/base/out_test.cc
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "out.h"
+
+#include <algorithm>
+#include <gtest/gtest.h>
+
+namespace art {
+
+struct OutTest : public testing::Test {
+ // Multiplies values less than 10 by two, stores the result and returns 0.
+ // Returns -1 if the original value was not multiplied by two.
+ static int multiply_small_values_by_two(size_t args, out<int> result) {
+ if (args < 10) {
+ *result = args * 2;
+ return 0;
+ } else {
+ return -1;
+ }
+ }
+};
+
+extern "C" int multiply_small_values_by_two_legacy(size_t args, int* result) {
+ if (args < 10) {
+ *result = args * 2;
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+TEST_F(OutTest, TraditionalCall) {
+ // For calling traditional C++ functions.
+ int res;
+ EXPECT_EQ(multiply_small_values_by_two(1, outof(res)), 0);
+ EXPECT_EQ(2, res);
+}
+
+TEST_F(OutTest, LegacyCall) {
+ // For calling legacy, e.g. C-style functions.
+ int res2;
+ EXPECT_EQ(0, multiply_small_values_by_two_legacy(1, outof(res2)));
+ EXPECT_EQ(2, res2);
+}
+
+TEST_F(OutTest, CallFromIterator) {
+ // For calling a function with a parameter originating as an iterator.
+ std::vector<int> list = {1, 2, 3}; // NOLINT [whitespace/labels] [4]
+ std::vector<int>::iterator it = list.begin();
+
+ EXPECT_EQ(0, multiply_small_values_by_two(2, outof_iterator(it)));
+ EXPECT_EQ(4, list[0]);
+}
+
+TEST_F(OutTest, CallFromPointer) {
+ // For calling a function with a parameter originating as a C-pointer.
+ std::vector<int> list = {1, 2, 3}; // NOLINT [whitespace/labels] [4]
+
+ int* list_ptr = &list[2]; // 3
+
+ EXPECT_EQ(0, multiply_small_values_by_two(2, outof_ptr(list_ptr)));
+ EXPECT_EQ(4, list[2]);
+}
+
+TEST_F(OutTest, OutAsIterator) {
+ // For using the out<T> parameter as an iterator inside of the callee.
+ std::vector<int> list;
+ int x = 100;
+ out<int> out_from_x = outof(x);
+
+ for (const int& val : out_from_x) {
+ list.push_back(val);
+ }
+
+ ASSERT_EQ(1u, list.size());
+ EXPECT_EQ(100, list[0]);
+
+ // A more typical use-case would be to use std algorithms
+ EXPECT_NE(out_from_x.end(),
+ std::find(out_from_x.begin(),
+ out_from_x.end(),
+ 100)); // Search for '100' in out.
+}
+
+} // namespace art
diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h
index b300109..e10cd24 100644
--- a/runtime/base/timing_logger.h
+++ b/runtime/base/timing_logger.h
@@ -33,17 +33,17 @@
explicit CumulativeLogger(const std::string& name);
~CumulativeLogger();
void Start();
- void End() LOCKS_EXCLUDED(lock_);
- void Reset() LOCKS_EXCLUDED(lock_);
- void Dump(std::ostream& os) const LOCKS_EXCLUDED(lock_);
+ void End() REQUIRES(!lock_);
+ void Reset() REQUIRES(!lock_);
+ void Dump(std::ostream& os) const REQUIRES(!lock_);
uint64_t GetTotalNs() const {
return GetTotalTime() * kAdjust;
}
// Allow the name to be modified, particularly when the cumulative logger is a field within a
// parent class that is unable to determine the "name" of a sub-class.
- void SetName(const std::string& name) LOCKS_EXCLUDED(lock_);
- void AddLogger(const TimingLogger& logger) LOCKS_EXCLUDED(lock_);
- size_t GetIterations() const;
+ void SetName(const std::string& name) REQUIRES(!lock_);
+ void AddLogger(const TimingLogger& logger) REQUIRES(!lock_);
+ size_t GetIterations() const REQUIRES(!lock_);
private:
class HistogramComparator {
@@ -58,8 +58,8 @@
static constexpr size_t kInitialBucketSize = 50; // 50 microseconds.
void AddPair(const std::string &label, uint64_t delta_time)
- EXCLUSIVE_LOCKS_REQUIRED(lock_);
- void DumpHistogram(std::ostream &os) const EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ REQUIRES(lock_);
+ void DumpHistogram(std::ostream &os) const REQUIRES(lock_);
uint64_t GetTotalTime() const {
return total_time_;
}
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 0ae32f4..38bc818 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -16,6 +16,7 @@
#include "check_jni.h"
+#include <iomanip>
#include <sys/mman.h>
#include <zlib.h>
@@ -155,7 +156,7 @@
* Assumes "jobj" has already been validated.
*/
bool CheckInstanceFieldID(ScopedObjectAccess& soa, jobject java_object, jfieldID fid)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
if (o == nullptr) {
AbortF("field operation on NULL object: %p", java_object);
@@ -199,7 +200,7 @@
*/
bool CheckMethodAndSig(ScopedObjectAccess& soa, jobject jobj, jclass jc,
jmethodID mid, Primitive::Type type, InvokeType invoke)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = CheckMethodID(soa, mid);
if (m == nullptr) {
return false;
@@ -246,7 +247,7 @@
* Assumes "java_class" has already been validated.
*/
bool CheckStaticFieldID(ScopedObjectAccess& soa, jclass java_class, jfieldID fid)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
ArtField* f = CheckFieldID(soa, fid);
if (f == nullptr) {
@@ -269,7 +270,7 @@
* Instances of "java_class" must be instances of the method's declaring class.
*/
bool CheckStaticMethod(ScopedObjectAccess& soa, jclass java_class, jmethodID mid)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = CheckMethodID(soa, mid);
if (m == nullptr) {
return false;
@@ -290,7 +291,7 @@
* will be handled automatically by the instanceof check.)
*/
bool CheckVirtualMethod(ScopedObjectAccess& soa, jobject java_object, jmethodID mid)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = CheckMethodID(soa, mid);
if (m == nullptr) {
return false;
@@ -343,7 +344,7 @@
* Use the kFlag_NullableUtf flag where 'u' field(s) are nullable.
*/
bool Check(ScopedObjectAccess& soa, bool entry, const char* fmt, JniValueType* args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* traceMethod = nullptr;
if (has_method_ && soa.Vm()->IsTracingEnabled()) {
// We need to guard some of the invocation interface's calls: a bad caller might
@@ -443,7 +444,7 @@
}
bool CheckReflectedMethod(ScopedObjectAccess& soa, jobject jmethod)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Object* method = soa.Decode<mirror::Object*>(jmethod);
if (method == nullptr) {
AbortF("expected non-null method");
@@ -461,7 +462,7 @@
}
bool CheckConstructor(ScopedObjectAccess& soa, jmethodID mid)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* method = soa.DecodeMethod(mid);
if (method == nullptr) {
AbortF("expected non-null constructor");
@@ -475,7 +476,7 @@
}
bool CheckReflectedField(ScopedObjectAccess& soa, jobject jfield)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Object* field = soa.Decode<mirror::Object*>(jfield);
if (field == nullptr) {
AbortF("expected non-null java.lang.reflect.Field");
@@ -491,7 +492,7 @@
}
bool CheckThrowable(ScopedObjectAccess& soa, jthrowable jobj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Object* obj = soa.Decode<mirror::Object*>(jobj);
if (!obj->GetClass()->IsThrowableClass()) {
AbortF("expected java.lang.Throwable but got object of type "
@@ -502,7 +503,7 @@
}
bool CheckThrowableClass(ScopedObjectAccess& soa, jclass jc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Class* c = soa.Decode<mirror::Class*>(jc);
if (!c->IsThrowableClass()) {
AbortF("expected java.lang.Throwable class but got object of "
@@ -533,7 +534,7 @@
}
bool CheckInstantiableNonArray(ScopedObjectAccess& soa, jclass jc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Class* c = soa.Decode<mirror::Class*>(jc);
if (!c->IsInstantiableNonArray()) {
AbortF("can't make objects of type %s: %p", PrettyDescriptor(c).c_str(), c);
@@ -543,7 +544,7 @@
}
bool CheckPrimitiveArrayType(ScopedObjectAccess& soa, jarray array, Primitive::Type type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (!CheckArray(soa, array)) {
return false;
}
@@ -558,7 +559,7 @@
bool CheckFieldAccess(ScopedObjectAccess& soa, jobject obj, jfieldID fid, bool is_static,
Primitive::Type type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (is_static && !CheckStaticFieldID(soa, down_cast<jclass>(obj), fid)) {
return false;
}
@@ -619,7 +620,7 @@
* to "running" mode before doing the checks.
*/
bool CheckInstance(ScopedObjectAccess& soa, InstanceKind kind, jobject java_object, bool null_ok)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const char* what = nullptr;
switch (kind) {
case kClass:
@@ -715,7 +716,7 @@
}
bool CheckPossibleHeapValue(ScopedObjectAccess& soa, char fmt, JniValueType arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
switch (fmt) {
case 'a': // jarray
return CheckArray(soa, arg.a);
@@ -785,7 +786,7 @@
void TracePossibleHeapValue(ScopedObjectAccess& soa, bool entry, char fmt, JniValueType arg,
std::string* msg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
switch (fmt) {
case 'L': // jobject fall-through.
case 'a': // jarray fall-through.
@@ -946,7 +947,7 @@
* Since we're dealing with objects, switch to "running" mode.
*/
bool CheckArray(ScopedObjectAccess& soa, jarray java_array)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (UNLIKELY(java_array == nullptr)) {
AbortF("jarray was NULL");
return false;
@@ -983,7 +984,7 @@
}
ArtField* CheckFieldID(ScopedObjectAccess& soa, jfieldID fid)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (fid == nullptr) {
AbortF("jfieldID was NULL");
return nullptr;
@@ -999,7 +1000,7 @@
}
ArtMethod* CheckMethodID(ScopedObjectAccess& soa, jmethodID mid)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (mid == nullptr) {
AbortF("jmethodID was NULL");
return nullptr;
@@ -1014,7 +1015,7 @@
return m;
}
- bool CheckThread(JNIEnv* env) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool CheckThread(JNIEnv* env) SHARED_REQUIRES(Locks::mutator_lock_) {
Thread* self = Thread::Current();
if (self == nullptr) {
AbortF("a thread (tid %d) is making JNI calls without being attached", GetTid());
@@ -1083,10 +1084,29 @@
}
const char* errorKind = nullptr;
- uint8_t utf8 = CheckUtfBytes(bytes, &errorKind);
+ const uint8_t* utf8 = CheckUtfBytes(bytes, &errorKind);
if (errorKind != nullptr) {
+ // This is an expensive loop that will resize often, but this isn't supposed to hit in
+ // practice anyways.
+ std::ostringstream oss;
+ oss << std::hex;
+ const uint8_t* tmp = reinterpret_cast<const uint8_t*>(bytes);
+ while (*tmp != 0) {
+ if (tmp == utf8) {
+ oss << "<";
+ }
+ oss << "0x" << std::setfill('0') << std::setw(2) << static_cast<uint32_t>(*tmp);
+ if (tmp == utf8) {
+ oss << '>';
+ }
+ tmp++;
+ if (*tmp != 0) {
+ oss << ' ';
+ }
+ }
+
AbortF("input is not valid Modified UTF-8: illegal %s byte %#x\n"
- " string: '%s'", errorKind, utf8, bytes);
+ " string: '%s'\n input: '%s'", errorKind, *utf8, bytes, oss.str().c_str());
return false;
}
return true;
@@ -1094,11 +1114,11 @@
// Checks whether |bytes| is valid modified UTF-8. We also accept 4 byte UTF
// sequences in place of encoded surrogate pairs.
- static uint8_t CheckUtfBytes(const char* bytes, const char** errorKind) {
+ static const uint8_t* CheckUtfBytes(const char* bytes, const char** errorKind) {
while (*bytes != '\0') {
- uint8_t utf8 = *(bytes++);
+ const uint8_t* utf8 = reinterpret_cast<const uint8_t*>(bytes++);
// Switch on the high four bits.
- switch (utf8 >> 4) {
+ switch (*utf8 >> 4) {
case 0x00:
case 0x01:
case 0x02:
@@ -1118,11 +1138,11 @@
return utf8;
case 0x0f:
// Bit pattern 1111, which might be the start of a 4 byte sequence.
- if ((utf8 & 0x08) == 0) {
+ if ((*utf8 & 0x08) == 0) {
// Bit pattern 1111 0xxx, which is the start of a 4 byte sequence.
// We consume one continuation byte here, and fall through to consume two more.
- utf8 = *(bytes++);
- if ((utf8 & 0xc0) != 0x80) {
+ utf8 = reinterpret_cast<const uint8_t*>(bytes++);
+ if ((*utf8 & 0xc0) != 0x80) {
*errorKind = "continuation";
return utf8;
}
@@ -1135,8 +1155,8 @@
FALLTHROUGH_INTENDED;
case 0x0e:
// Bit pattern 1110, so there are two additional bytes.
- utf8 = *(bytes++);
- if ((utf8 & 0xc0) != 0x80) {
+ utf8 = reinterpret_cast<const uint8_t*>(bytes++);
+ if ((*utf8 & 0xc0) != 0x80) {
*errorKind = "continuation";
return utf8;
}
@@ -1146,8 +1166,8 @@
case 0x0c:
case 0x0d:
// Bit pattern 110x, so there is one additional byte.
- utf8 = *(bytes++);
- if ((utf8 & 0xc0) != 0x80) {
+ utf8 = reinterpret_cast<const uint8_t*>(bytes++);
+ if ((*utf8 & 0xc0) != 0x80) {
*errorKind = "continuation";
return utf8;
}
@@ -2670,7 +2690,7 @@
static bool CheckCallArgs(ScopedObjectAccess& soa, ScopedCheck& sc, JNIEnv* env, jobject obj,
jclass c, jmethodID mid, InvokeType invoke)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
bool checked;
switch (invoke) {
case kVirtual: {
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 504b753..3155b51 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -28,10 +28,10 @@
// holding references.
class CheckReferenceMapVisitor : public StackVisitor {
public:
- explicit CheckReferenceMapVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ explicit CheckReferenceMapVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsCalleeSaveMethod() || m->IsNative()) {
CHECK_EQ(GetDexPc(), DexFile::kDexNoIndex);
@@ -52,7 +52,7 @@
}
void CheckReferences(int* registers, int number_of_references, uint32_t native_pc_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (GetMethod()->IsOptimized(sizeof(void*))) {
CheckOptimizedMethod(registers, number_of_references, native_pc_offset);
} else {
@@ -62,7 +62,7 @@
private:
void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
CodeInfo code_info = m->GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
@@ -104,7 +104,7 @@
}
void CheckQuickMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
NativePcOffsetToReferenceMap map(m->GetNativeGcMap(sizeof(void*)));
const uint8_t* ref_bitmap = map.FindBitMap(native_pc_offset);
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 21b63c6..c08417f 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -117,8 +117,10 @@
return resolved_method;
}
-inline ArtMethod* ClassLinker::ResolveMethod(Thread* self, uint32_t method_idx,
- ArtMethod* referrer, InvokeType type) {
+inline ArtMethod* ClassLinker::ResolveMethod(Thread* self,
+ uint32_t method_idx,
+ ArtMethod* referrer,
+ InvokeType type) {
ArtMethod* resolved_method = GetResolvedMethod(method_idx, referrer);
if (UNLIKELY(resolved_method == nullptr)) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
@@ -143,7 +145,8 @@
return GetResolvedField(field_idx, field_declaring_class->GetDexCache());
}
-inline ArtField* ClassLinker::ResolveField(uint32_t field_idx, ArtMethod* referrer,
+inline ArtField* ClassLinker::ResolveField(uint32_t field_idx,
+ ArtMethod* referrer,
bool is_static) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
ArtField* resolved_field = GetResolvedField(field_idx, declaring_class);
@@ -187,7 +190,7 @@
}
inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!class_roots_.IsNull());
mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
mirror::Class* klass = class_roots->Get(class_root);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 8f7862a..82cf7af 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -30,6 +30,7 @@
#include "base/arena_allocator.h"
#include "base/casts.h"
#include "base/logging.h"
+#include "base/out.h"
#include "base/scoped_arena_containers.h"
#include "base/scoped_flock.h"
#include "base/stl_util.h"
@@ -91,7 +92,7 @@
static void ThrowNoClassDefFoundError(const char* fmt, ...)
__attribute__((__format__(__printf__, 1, 2)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void ThrowNoClassDefFoundError(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
@@ -100,14 +101,12 @@
va_end(args);
}
-bool ClassLinker::HasInitWithString(
- Thread* self, ClassLinker* class_linker, const char* descriptor) {
+bool ClassLinker::HasInitWithString(Thread* self, const char* descriptor) {
ArtMethod* method = self->GetCurrentMethod(nullptr);
StackHandleScope<1> hs(self);
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(method != nullptr ?
- method->GetDeclaringClass()->GetClassLoader()
- : nullptr));
- mirror::Class* exception_class = class_linker->FindClass(self, descriptor, class_loader);
+ method->GetDeclaringClass()->GetClassLoader() : nullptr));
+ mirror::Class* exception_class = FindClass(self, descriptor, class_loader);
if (exception_class == nullptr) {
// No exc class ~ no <init>-with-string.
@@ -144,7 +143,7 @@
std::string temp;
const char* descriptor = c->GetVerifyErrorClass()->GetDescriptor(&temp);
- if (HasInitWithString(self, this, descriptor)) {
+ if (HasInitWithString(self, descriptor)) {
self->ThrowNewException(descriptor, PrettyDescriptor(c).c_str());
} else {
self->ThrowNewException(descriptor, nullptr);
@@ -157,7 +156,7 @@
}
static void VlogClassInitializationFailure(Handle<mirror::Class> klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (VLOG_IS_ON(class_linker)) {
std::string temp;
LOG(INFO) << "Failed to initialize class " << klass->GetDescriptor(&temp) << " from "
@@ -166,7 +165,7 @@
}
static void WrapExceptionInInitializer(Handle<mirror::Class> klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Thread* self = Thread::Current();
JNIEnv* env = self->GetJniEnv();
@@ -200,7 +199,7 @@
return lhs.size < rhs.size || (lhs.size == rhs.size && lhs.start_offset > rhs.start_offset);
}
};
-typedef std::priority_queue<FieldGap, std::vector<FieldGap>, FieldGapsComparator> FieldGaps;
+using FieldGaps = std::priority_queue<FieldGap, std::vector<FieldGap>, FieldGapsComparator>;
// Adds largest aligned gaps to queue of gaps.
static void AddFieldGap(uint32_t gap_start, uint32_t gap_end, FieldGaps* gaps) {
@@ -228,7 +227,7 @@
MemberOffset* field_offset,
std::deque<ArtField*>* grouped_and_sorted_fields,
FieldGaps* gaps)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(current_field_idx != nullptr);
DCHECK(grouped_and_sorted_fields != nullptr);
DCHECK(gaps != nullptr);
@@ -777,12 +776,13 @@
// be from multidex, which resolves correctly).
};
-static void AddDexFilesFromOat(const OatFile* oat_file, bool already_loaded,
+static void AddDexFilesFromOat(const OatFile* oat_file,
+ bool already_loaded,
std::priority_queue<DexFileAndClassPair>* heap) {
const std::vector<const OatDexFile*>& oat_dex_files = oat_file->GetOatDexFiles();
for (const OatDexFile* oat_dex_file : oat_dex_files) {
std::string error;
- std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error);
+ std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(outof(error));
if (dex_file.get() == nullptr) {
LOG(WARNING) << "Could not create dex file from oat file: " << error;
} else {
@@ -838,7 +838,7 @@
// against the following top element. If the descriptor is the same, it is now checked whether
// the two elements agree on whether their dex file was from an already-loaded oat-file or the
// new oat file. Any disagreement indicates a collision.
-bool ClassLinker::HasCollisions(const OatFile* oat_file, std::string* error_msg) {
+bool ClassLinker::HasCollisions(const OatFile* oat_file, out<std::string> error_msg) {
if (!kDuplicateClassesCheck) {
return false;
}
@@ -903,10 +903,9 @@
}
std::vector<std::unique_ptr<const DexFile>> ClassLinker::OpenDexFilesFromOat(
- const char* dex_location, const char* oat_location,
- std::vector<std::string>* error_msgs) {
- CHECK(error_msgs != nullptr);
-
+ const char* dex_location,
+ const char* oat_location,
+ out<std::vector<std::string>> error_msgs) {
// Verify we aren't holding the mutator lock, which could starve GC if we
// have to generate or relocate an oat file.
Locks::mutator_lock_->AssertNotHeld(Thread::Current());
@@ -948,7 +947,7 @@
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
if (oat_file.get() != nullptr) {
// Take the file only if it has no collisions, or we must take it because of preopting.
- bool accept_oat_file = !HasCollisions(oat_file.get(), &error_msg);
+ bool accept_oat_file = !HasCollisions(oat_file.get(), outof(error_msg));
if (!accept_oat_file) {
// Failed the collision check. Print warning.
if (Runtime::Current()->IsDexFileFallbackEnabled()) {
@@ -982,8 +981,7 @@
if (source_oat_file != nullptr) {
dex_files = oat_file_assistant.LoadDexFiles(*source_oat_file, dex_location);
if (dex_files.empty()) {
- error_msgs->push_back("Failed to open dex files from "
- + source_oat_file->GetLocation());
+ error_msgs->push_back("Failed to open dex files from " + source_oat_file->GetLocation());
}
}
@@ -1019,9 +1017,10 @@
return nullptr;
}
-static void SanityCheckArtMethod(ArtMethod* m, mirror::Class* expected_class,
+static void SanityCheckArtMethod(ArtMethod* m,
+ mirror::Class* expected_class,
gc::space::ImageSpace* space)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (m->IsRuntimeMethod()) {
CHECK(m->GetDeclaringClass() == nullptr) << PrettyMethod(m);
} else if (m->IsMiranda()) {
@@ -1037,9 +1036,11 @@
}
}
-static void SanityCheckArtMethodPointerArray(
- mirror::PointerArray* arr, mirror::Class* expected_class, size_t pointer_size,
- gc::space::ImageSpace* space) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+static void SanityCheckArtMethodPointerArray(mirror::PointerArray* arr,
+ mirror::Class* expected_class,
+ size_t pointer_size,
+ gc::space::ImageSpace* space)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(arr != nullptr);
for (int32_t j = 0; j < arr->GetLength(); ++j) {
auto* method = arr->GetElementPtrSize<ArtMethod*>(j, pointer_size);
@@ -1054,7 +1055,7 @@
}
static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
CHECK(obj->GetClass() != nullptr) << "Null class in object " << obj;
CHECK(obj->GetClass()->GetClass() != nullptr) << "Null class class " << obj;
@@ -1145,7 +1146,7 @@
nullptr);
CHECK(oat_dex_file != nullptr) << oat_file.GetLocation() << " " << dex_file_location;
std::string error_msg;
- std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
+ std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(outof(error_msg));
if (dex_file.get() == nullptr) {
LOG(FATAL) << "Failed to open dex file " << dex_file_location
<< " from within oat file " << oat_file.GetLocation()
@@ -1238,11 +1239,8 @@
bool ClassLinker::ClassInClassTable(mirror::Class* klass) {
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- auto it = class_table_.Find(GcRoot<mirror::Class>(klass));
- if (it == class_table_.end()) {
- return false;
- }
- return it->Read() == klass;
+ ClassTable* const class_table = ClassTableForClassLoader(klass->GetClassLoader());
+ return class_table != nullptr && class_table->Contains(klass);
}
void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) {
@@ -1254,8 +1252,7 @@
// There is 3 GC cases to handle:
// Non moving concurrent:
// This case is easy to handle since the reference members of ArtMethod and ArtFields are held
- // live by the class and class roots. In this case we probably don't even need to call
- // VisitNativeRoots.
+ // live by the class and class roots.
//
// Moving non-concurrent:
// This case needs to call visit VisitNativeRoots in case the classes or dex cache arrays move.
@@ -1266,35 +1263,30 @@
// Moving concurrent:
// Need to make sure to not copy ArtMethods without doing read barriers since the roots are
// marked concurrently and we don't hold the classlinker_classes_lock_ when we do the copy.
- for (GcRoot<mirror::Class>& root : class_table_) {
- buffered_visitor.VisitRoot(root);
- if ((flags & kVisitRootFlagNonMoving) == 0) {
- // Don't bother visiting ArtField and ArtMethod if kVisitRootFlagNonMoving is set since
- // these roots are all reachable from the class or dex cache.
- root.Read()->VisitNativeRoots(buffered_visitor, image_pointer_size_);
+ std::vector<std::pair<GcRoot<mirror::ClassLoader>, ClassTable*>> reinsert;
+ for (auto it = classes_.begin(); it != classes_.end(); ) {
+ it->second->VisitRoots(visitor, flags);
+ const GcRoot<mirror::ClassLoader>& root = it->first;
+ mirror::ClassLoader* old_ref = root.Read<kWithoutReadBarrier>();
+ root.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
+ mirror::ClassLoader* new_ref = root.Read<kWithoutReadBarrier>();
+ if (new_ref != old_ref) {
+ reinsert.push_back(*it);
+ it = classes_.erase(it);
+ } else {
+ ++it;
}
}
- // PreZygote classes can't move so we won't need to update fields' declaring classes.
- for (GcRoot<mirror::Class>& root : pre_zygote_class_table_) {
- buffered_visitor.VisitRoot(root);
- if ((flags & kVisitRootFlagNonMoving) == 0) {
- root.Read()->VisitNativeRoots(buffered_visitor, image_pointer_size_);
- }
+ for (auto& pair : reinsert) {
+ classes_.Put(pair.first, pair.second);
}
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
for (auto& root : new_class_roots_) {
mirror::Class* old_ref = root.Read<kWithoutReadBarrier>();
- old_ref->VisitNativeRoots(buffered_visitor, image_pointer_size_);
root.VisitRoot(visitor, RootInfo(kRootStickyClass));
mirror::Class* new_ref = root.Read<kWithoutReadBarrier>();
- if (UNLIKELY(new_ref != old_ref)) {
- // Uh ohes, GC moved a root in the log. Need to search the class_table and update the
- // corresponding object. This is slow, but luckily for us, this may only happen with a
- // concurrent moving GC.
- auto it = class_table_.Find(GcRoot<mirror::Class>(old_ref));
- DCHECK(it != class_table_.end());
- *it = GcRoot<mirror::Class>(new_ref);
- }
+ // Concurrent moving GC marked new roots through the to-space invariant.
+ CHECK_EQ(new_ref, old_ref);
}
}
buffered_visitor.Flush(); // Flush before clearing new_class_roots_.
@@ -1343,91 +1335,103 @@
}
}
-void ClassLinker::VisitClasses(ClassVisitor* visitor, void* arg) {
+void ClassLinker::VisitClassesInternal(ClassVisitor* visitor) {
+ for (auto& pair : classes_) {
+ ClassTable* const class_table = pair.second;
+ if (!class_table->Visit(visitor)) {
+ return;
+ }
+ }
+}
+
+void ClassLinker::VisitClasses(ClassVisitor* visitor) {
if (dex_cache_image_class_lookup_required_) {
MoveImageClassesToClassTable();
}
- // TODO: why isn't this a ReaderMutexLock?
- WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- for (GcRoot<mirror::Class>& root : class_table_) {
- if (!visitor(root.Read(), arg)) {
- return;
- }
- }
- for (GcRoot<mirror::Class>& root : pre_zygote_class_table_) {
- if (!visitor(root.Read(), arg)) {
- return;
- }
+ Thread* const self = Thread::Current();
+ ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
+ // Not safe to have thread suspension when we are holding a lock.
+ if (self != nullptr) {
+ ScopedAssertNoThreadSuspension nts(self, __FUNCTION__);
+ VisitClassesInternal(visitor);
+ } else {
+ VisitClassesInternal(visitor);
}
}
-static bool GetClassesVisitorSet(mirror::Class* c, void* arg) {
- std::set<mirror::Class*>* classes = reinterpret_cast<std::set<mirror::Class*>*>(arg);
- classes->insert(c);
- return true;
-}
-
-struct GetClassesVisitorArrayArg {
- Handle<mirror::ObjectArray<mirror::Class>>* classes;
- int32_t index;
- bool success;
+class GetClassesInToVector : public ClassVisitor {
+ public:
+ bool Visit(mirror::Class* klass) OVERRIDE {
+ classes_.push_back(klass);
+ return true;
+ }
+ std::vector<mirror::Class*> classes_;
};
-static bool GetClassesVisitorArray(mirror::Class* c, void* varg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- GetClassesVisitorArrayArg* arg = reinterpret_cast<GetClassesVisitorArrayArg*>(varg);
- if (arg->index < (*arg->classes)->GetLength()) {
- (*arg->classes)->Set(arg->index, c);
- arg->index++;
- return true;
- } else {
- arg->success = false;
+class GetClassInToObjectArray : public ClassVisitor {
+ public:
+ explicit GetClassInToObjectArray(mirror::ObjectArray<mirror::Class>* arr)
+ : arr_(arr), index_(0) {}
+
+ bool Visit(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ ++index_;
+ if (index_ <= arr_->GetLength()) {
+ arr_->Set(index_ - 1, klass);
+ return true;
+ }
return false;
}
-}
-void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) {
+ bool Succeeded() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ return index_ <= arr_->GetLength();
+ }
+
+ private:
+ mirror::ObjectArray<mirror::Class>* const arr_;
+ int32_t index_;
+};
+
+void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor) {
// TODO: it may be possible to avoid secondary storage if we iterate over dex caches. The problem
// is avoiding duplicates.
if (!kMovingClasses) {
- std::set<mirror::Class*> classes;
- VisitClasses(GetClassesVisitorSet, &classes);
- for (mirror::Class* klass : classes) {
- if (!visitor(klass, arg)) {
+ GetClassesInToVector accumulator;
+ VisitClasses(&accumulator);
+ for (mirror::Class* klass : accumulator.classes_) {
+ if (!visitor->Visit(klass)) {
return;
}
}
} else {
- Thread* self = Thread::Current();
+ Thread* const self = Thread::Current();
StackHandleScope<1> hs(self);
- MutableHandle<mirror::ObjectArray<mirror::Class>> classes =
- hs.NewHandle<mirror::ObjectArray<mirror::Class>>(nullptr);
- GetClassesVisitorArrayArg local_arg;
- local_arg.classes = &classes;
- local_arg.success = false;
+ auto classes = hs.NewHandle<mirror::ObjectArray<mirror::Class>>(nullptr);
// We size the array assuming classes won't be added to the class table during the visit.
// If this assumption fails we iterate again.
- while (!local_arg.success) {
+ while (true) {
size_t class_table_size;
{
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
- class_table_size = class_table_.Size() + pre_zygote_class_table_.Size();
+ // Add 100 in case new classes get loaded when we are filling in the object array.
+ class_table_size = NumZygoteClasses() + NumNonZygoteClasses() + 100;
}
mirror::Class* class_type = mirror::Class::GetJavaLangClass();
mirror::Class* array_of_class = FindArrayClass(self, &class_type);
classes.Assign(
mirror::ObjectArray<mirror::Class>::Alloc(self, array_of_class, class_table_size));
CHECK(classes.Get() != nullptr); // OOME.
- local_arg.index = 0;
- local_arg.success = true;
- VisitClasses(GetClassesVisitorArray, &local_arg);
+ GetClassInToObjectArray accumulator(classes.Get());
+ VisitClasses(&accumulator);
+ if (accumulator.Succeeded()) {
+ break;
+ }
}
for (int32_t i = 0; i < classes->GetLength(); ++i) {
// If the class table shrank during creation of the clases array we expect null elements. If
// the class table grew then the loop repeats. If classes are created after the loop has
// finished then we don't visit.
mirror::Class* klass = classes->Get(i);
- if (klass != nullptr && !visitor(klass, arg)) {
+ if (klass != nullptr && !visitor->Visit(klass)) {
return;
}
}
@@ -1455,6 +1459,7 @@
mirror::LongArray::ResetArrayClass();
mirror::ShortArray::ResetArrayClass();
STLDeleteElements(&oat_files_);
+ STLDeleteValues(&classes_);
}
mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length) {
@@ -1501,7 +1506,8 @@
return dex_cache.Get();
}
-mirror::Class* ClassLinker::AllocClass(Thread* self, mirror::Class* java_lang_Class,
+mirror::Class* ClassLinker::AllocClass(Thread* self,
+ mirror::Class* java_lang_Class,
uint32_t class_size) {
DCHECK_GE(class_size, sizeof(mirror::Class));
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -1520,13 +1526,14 @@
return AllocClass(self, GetClassRoot(kJavaLangClass), class_size);
}
-mirror::ObjectArray<mirror::StackTraceElement>* ClassLinker::AllocStackTraceElementArray(
- Thread* self, size_t length) {
+mirror::ObjectArray<mirror::StackTraceElement>*
+ClassLinker::AllocStackTraceElementArray(Thread* self, size_t length) {
return mirror::ObjectArray<mirror::StackTraceElement>::Alloc(
self, GetClassRoot(kJavaLangStackTraceElementArrayClass), length);
}
-mirror::Class* ClassLinker::EnsureResolved(Thread* self, const char* descriptor,
+mirror::Class* ClassLinker::EnsureResolved(Thread* self,
+ const char* descriptor,
mirror::Class* klass) {
DCHECK(klass != nullptr);
@@ -1585,7 +1592,8 @@
// Search a collection of DexFiles for a descriptor
ClassPathEntry FindInClassPath(const char* descriptor,
- size_t hash, const std::vector<const DexFile*>& class_path) {
+ size_t hash,
+ const std::vector<const DexFile*>& class_path) {
for (const DexFile* dex_file : class_path) {
const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor, hash);
if (dex_class_def != nullptr) {
@@ -1597,23 +1605,24 @@
static bool IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
mirror::ClassLoader* class_loader)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return class_loader == nullptr ||
class_loader->GetClass() ==
soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_BootClassLoader);
}
bool ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
- Thread* self, const char* descriptor,
+ Thread* self,
+ const char* descriptor,
size_t hash,
Handle<mirror::ClassLoader> class_loader,
- mirror::Class** result) {
+ out<mirror::Class*> result) {
// Termination case: boot class-loader.
if (IsBootClassLoader(soa, class_loader.Get())) {
// The boot class loader, search the boot class path.
ClassPathEntry pair = FindInClassPath(descriptor, hash, boot_class_path_);
if (pair.second != nullptr) {
- mirror::Class* klass = LookupClass(self, descriptor, hash, nullptr);
+ mirror::Class* klass = LookupClass(self, descriptor, hash, nullptr /* no classloader */);
if (klass != nullptr) {
*result = EnsureResolved(self, descriptor, klass);
} else {
@@ -1715,7 +1724,8 @@
return true;
}
-mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
+mirror::Class* ClassLinker::FindClass(Thread* self,
+ const char* descriptor,
Handle<mirror::ClassLoader> class_loader) {
DCHECK_NE(*descriptor, '\0') << "descriptor is empty string";
DCHECK(self != nullptr);
@@ -1751,7 +1761,7 @@
} else {
ScopedObjectAccessUnchecked soa(self);
mirror::Class* cp_klass;
- if (FindClassInPathClassLoader(soa, self, descriptor, hash, class_loader, &cp_klass)) {
+ if (FindClassInPathClassLoader(soa, self, descriptor, hash, class_loader, outof(cp_klass))) {
// The chain was understood. So the value in cp_klass is either the class we were looking
// for, or not found.
if (cp_klass != nullptr) {
@@ -1804,7 +1814,9 @@
UNREACHABLE();
}
-mirror::Class* ClassLinker::DefineClass(Thread* self, const char* descriptor, size_t hash,
+mirror::Class* ClassLinker::DefineClass(Thread* self,
+ const char* descriptor,
+ size_t hash,
Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def) {
@@ -1890,7 +1902,7 @@
auto interfaces = hs.NewHandle<mirror::ObjectArray<mirror::Class>>(nullptr);
MutableHandle<mirror::Class> h_new_class = hs.NewHandle<mirror::Class>(nullptr);
- if (!LinkClass(self, descriptor, klass, interfaces, &h_new_class)) {
+ if (!LinkClass(self, descriptor, klass, interfaces, outof(h_new_class))) {
// Linking failed.
if (!klass->IsErroneous()) {
mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
@@ -1973,8 +1985,9 @@
image_pointer_size_);
}
-OatFile::OatClass ClassLinker::FindOatClass(const DexFile& dex_file, uint16_t class_def_idx,
- bool* found) {
+OatFile::OatClass ClassLinker::FindOatClass(const DexFile& dex_file,
+ uint16_t class_def_idx,
+ out<bool> found) {
DCHECK_NE(class_def_idx, DexFile::kDexNoIndex16);
const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
if (oat_dex_file == nullptr) {
@@ -1985,7 +1998,8 @@
return oat_dex_file->GetOatClass(class_def_idx);
}
-static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file, uint16_t class_def_idx,
+static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file,
+ uint16_t class_def_idx,
uint32_t method_idx) {
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_idx);
const uint8_t* class_data = dex_file.GetClassData(class_def);
@@ -2019,7 +2033,7 @@
UNREACHABLE();
}
-const OatFile::OatMethod ClassLinker::FindOatMethodFor(ArtMethod* method, bool* found) {
+const OatFile::OatMethod ClassLinker::FindOatMethodFor(ArtMethod* method, out<bool> found) {
// Although we overwrite the trampoline of non-static methods, we may get here via the resolution
// method for direct methods (or virtual methods made direct).
mirror::Class* declaring_class = method->GetDeclaringClass();
@@ -2051,7 +2065,7 @@
method->GetDexMethodIndex()));
OatFile::OatClass oat_class = FindOatClass(*declaring_class->GetDexCache()->GetDexFile(),
declaring_class->GetDexClassDefIndex(),
- found);
+ outof_forward(found));
if (!(*found)) {
return OatFile::OatMethod::Invalid();
}
@@ -2065,7 +2079,7 @@
return GetQuickProxyInvokeHandler();
}
bool found;
- OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
+ OatFile::OatMethod oat_method = FindOatMethodFor(method, outof(found));
if (found) {
auto* code = oat_method.GetQuickCode();
if (code != nullptr) {
@@ -2091,7 +2105,7 @@
return nullptr;
}
bool found;
- OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
+ OatFile::OatMethod oat_method = FindOatMethodFor(method, outof(found));
if (found) {
return oat_method.GetQuickCode();
}
@@ -2105,10 +2119,11 @@
return nullptr;
}
-const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
+const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file,
+ uint16_t class_def_idx,
uint32_t method_idx) {
bool found;
- OatFile::OatClass oat_class = FindOatClass(dex_file, class_def_idx, &found);
+ OatFile::OatClass oat_class = FindOatClass(dex_file, class_def_idx, outof(found));
if (!found) {
return nullptr;
}
@@ -2118,7 +2133,7 @@
// Returns true if the method must run with interpreter, false otherwise.
static bool NeedsInterpreter(ArtMethod* method, const void* quick_code)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (quick_code == nullptr) {
// No code: need interpreter.
// May return true for native code, in the case of generic JNI
@@ -2159,7 +2174,7 @@
}
bool has_oat_class;
OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(),
- &has_oat_class);
+ outof(has_oat_class));
// Link the code of methods skipped by LinkCode.
for (size_t method_index = 0; it.HasNextDirectMethod(); ++method_index, it.Next()) {
ArtMethod* method = klass->GetDirectMethod(method_index, image_pointer_size_);
@@ -2187,7 +2202,8 @@
// Ignore virtual methods on the iterator.
}
-void ClassLinker::LinkCode(ArtMethod* method, const OatFile::OatClass* oat_class,
+void ClassLinker::LinkCode(ArtMethod* method,
+ const OatFile::OatClass* oat_class,
uint32_t class_def_method_index) {
Runtime* const runtime = Runtime::Current();
if (runtime->IsAotCompiler()) {
@@ -2239,8 +2255,10 @@
}
}
-void ClassLinker::SetupClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
- Handle<mirror::Class> klass, mirror::ClassLoader* class_loader) {
+void ClassLinker::SetupClass(const DexFile& dex_file,
+ const DexFile::ClassDef& dex_class_def,
+ Handle<mirror::Class> klass,
+ mirror::ClassLoader* class_loader) {
CHECK(klass.Get() != nullptr);
CHECK(klass->GetDexCache() != nullptr);
CHECK_EQ(mirror::Class::kStatusNotReady, klass->GetStatus());
@@ -2260,7 +2278,8 @@
CHECK(klass->GetDexCacheStrings() != nullptr);
}
-void ClassLinker::LoadClass(Thread* self, const DexFile& dex_file,
+void ClassLinker::LoadClass(Thread* self,
+ const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def,
Handle<mirror::Class> klass) {
const uint8_t* class_data = dex_file.GetClassData(dex_class_def);
@@ -2270,7 +2289,7 @@
bool has_oat_class = false;
if (Runtime::Current()->IsStarted() && !Runtime::Current()->IsAotCompiler()) {
OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(),
- &has_oat_class);
+ outof(has_oat_class));
if (has_oat_class) {
LoadClassMembers(self, dex_file, class_data, klass, &oat_class);
}
@@ -2299,7 +2318,8 @@
return reinterpret_cast<ArtMethod*>(ptr);
}
-void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file,
+void ClassLinker::LoadClassMembers(Thread* self,
+ const DexFile& dex_file,
const uint8_t* class_data,
Handle<mirror::Class> klass,
const OatFile::OatClass* oat_class) {
@@ -2389,10 +2409,13 @@
}
DCHECK(!it.HasNext());
}
+ // Ensure that the card is marked so that remembered sets pick up native roots.
+ Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(klass.Get());
self->AllowThreadSuspension();
}
-void ClassLinker::LoadField(const ClassDataItemIterator& it, Handle<mirror::Class> klass,
+void ClassLinker::LoadField(const ClassDataItemIterator& it,
+ Handle<mirror::Class> klass,
ArtField* dst) {
const uint32_t field_idx = it.GetMemberIndex();
dst->SetDexFieldIndex(field_idx);
@@ -2400,8 +2423,11 @@
dst->SetAccessFlags(it.GetFieldAccessFlags());
}
-void ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file, const ClassDataItemIterator& it,
- Handle<mirror::Class> klass, ArtMethod* dst) {
+void ClassLinker::LoadMethod(Thread* self,
+ const DexFile& dex_file,
+ const ClassDataItemIterator& it,
+ Handle<mirror::Class> klass,
+ ArtMethod* dst) {
uint32_t dex_method_idx = it.GetMemberIndex();
const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
const char* method_name = dex_file.StringDataByIdx(method_id.name_idx_);
@@ -2470,8 +2496,8 @@
bool ClassLinker::IsDexFileRegisteredLocked(const DexFile& dex_file) {
dex_lock_.AssertSharedHeld(Thread::Current());
- for (size_t i = 0; i != dex_caches_.size(); ++i) {
- mirror::DexCache* dex_cache = GetDexCache(i);
+ for (GcRoot<mirror::DexCache>& root : dex_caches_) {
+ mirror::DexCache* dex_cache = root.Read();
if (dex_cache->GetDexFile() == &dex_file) {
return true;
}
@@ -2601,7 +2627,9 @@
// array class; that always comes from the base element class.
//
// Returns null with an exception raised on failure.
-mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descriptor, size_t hash,
+mirror::Class* ClassLinker::CreateArrayClass(Thread* self,
+ const char* descriptor,
+ size_t hash,
Handle<mirror::ClassLoader> class_loader) {
// Identify the underlying component type
CHECK_EQ('[', descriptor[0]);
@@ -2769,8 +2797,7 @@
return nullptr;
}
-mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* klass,
- size_t hash) {
+mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* klass, size_t hash) {
if (VLOG_IS_ON(class_linker)) {
mirror::DexCache* dex_cache = klass->GetDexCache();
std::string source;
@@ -2781,11 +2808,13 @@
LOG(INFO) << "Loaded class " << descriptor << source;
}
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- mirror::Class* existing = LookupClassFromTableLocked(descriptor, klass->GetClassLoader(), hash);
+ mirror::ClassLoader* const class_loader = klass->GetClassLoader();
+ ClassTable* const class_table = InsertClassTableForClassLoader(class_loader);
+ mirror::Class* existing = class_table->Lookup(descriptor, hash);
if (existing != nullptr) {
return existing;
}
- if (kIsDebugBuild && !klass->IsTemp() && klass->GetClassLoader() == nullptr &&
+ if (kIsDebugBuild && !klass->IsTemp() && class_loader == nullptr &&
dex_cache_image_class_lookup_required_) {
// Check a class loaded with the system class loader matches one in the image if the class
// is in the image.
@@ -2795,118 +2824,64 @@
}
}
VerifyObject(klass);
- class_table_.InsertWithHash(GcRoot<mirror::Class>(klass), hash);
+ class_table->InsertWithHash(klass, hash);
if (log_new_class_table_roots_) {
new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
}
return nullptr;
}
-void ClassLinker::UpdateClassVirtualMethods(mirror::Class* klass, ArtMethod* new_methods,
+void ClassLinker::UpdateClassVirtualMethods(mirror::Class* klass,
+ ArtMethod* new_methods,
size_t new_num_methods) {
- // classlinker_classes_lock_ is used to guard against races between root marking and changing the
- // direct and virtual method pointers.
- WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ // TODO: Fix the race condition here. b/22832610
klass->SetNumVirtualMethods(new_num_methods);
klass->SetVirtualMethodsPtr(new_methods);
- if (log_new_class_table_roots_) {
- new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
- }
-}
-
-mirror::Class* ClassLinker::UpdateClass(const char* descriptor, mirror::Class* klass,
- size_t hash) {
- WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- auto existing_it = class_table_.FindWithHash(std::make_pair(descriptor, klass->GetClassLoader()),
- hash);
- CHECK(existing_it != class_table_.end());
- mirror::Class* existing = existing_it->Read();
- CHECK_NE(existing, klass) << descriptor;
- CHECK(!existing->IsResolved()) << descriptor;
- CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusResolving) << descriptor;
-
- CHECK(!klass->IsTemp()) << descriptor;
- if (kIsDebugBuild && klass->GetClassLoader() == nullptr &&
- dex_cache_image_class_lookup_required_) {
- // Check a class loaded with the system class loader matches one in the image if the class
- // is in the image.
- existing = LookupClassFromImage(descriptor);
- if (existing != nullptr) {
- CHECK_EQ(klass, existing) << descriptor;
- }
- }
- VerifyObject(klass);
-
- // Update the element in the hash set.
- *existing_it = GcRoot<mirror::Class>(klass);
- if (log_new_class_table_roots_) {
- new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
- }
-
- return existing;
+ // Need to mark the card so that the remembered sets and mod union tables get update.
+ Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(klass);
}
bool ClassLinker::RemoveClass(const char* descriptor, mirror::ClassLoader* class_loader) {
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- auto pair = std::make_pair(descriptor, class_loader);
- auto it = class_table_.Find(pair);
- if (it != class_table_.end()) {
- class_table_.Erase(it);
- return true;
- }
- it = pre_zygote_class_table_.Find(pair);
- if (it != pre_zygote_class_table_.end()) {
- pre_zygote_class_table_.Erase(it);
- return true;
- }
- return false;
+ ClassTable* const class_table = ClassTableForClassLoader(class_loader);
+ return class_table != nullptr && class_table->Remove(descriptor);
}
-mirror::Class* ClassLinker::LookupClass(Thread* self, const char* descriptor, size_t hash,
+mirror::Class* ClassLinker::LookupClass(Thread* self,
+ const char* descriptor,
+ size_t hash,
mirror::ClassLoader* class_loader) {
{
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
- mirror::Class* result = LookupClassFromTableLocked(descriptor, class_loader, hash);
- if (result != nullptr) {
- return result;
+ ClassTable* const class_table = ClassTableForClassLoader(class_loader);
+ if (class_table != nullptr) {
+ mirror::Class* result = class_table->Lookup(descriptor, hash);
+ if (result != nullptr) {
+ return result;
+ }
}
}
if (class_loader != nullptr || !dex_cache_image_class_lookup_required_) {
return nullptr;
+ }
+ // Lookup failed but need to search dex_caches_.
+ mirror::Class* result = LookupClassFromImage(descriptor);
+ if (result != nullptr) {
+ result = InsertClass(descriptor, result, hash);
} else {
- // Lookup failed but need to search dex_caches_.
- mirror::Class* result = LookupClassFromImage(descriptor);
- if (result != nullptr) {
- InsertClass(descriptor, result, hash);
- } else {
- // Searching the image dex files/caches failed, we don't want to get into this situation
- // often as map searches are faster, so after kMaxFailedDexCacheLookups move all image
- // classes into the class table.
- constexpr uint32_t kMaxFailedDexCacheLookups = 1000;
- if (++failed_dex_cache_class_lookups_ > kMaxFailedDexCacheLookups) {
- MoveImageClassesToClassTable();
- }
- }
- return result;
- }
-}
-
-mirror::Class* ClassLinker::LookupClassFromTableLocked(const char* descriptor,
- mirror::ClassLoader* class_loader,
- size_t hash) {
- auto descriptor_pair = std::make_pair(descriptor, class_loader);
- auto it = pre_zygote_class_table_.FindWithHash(descriptor_pair, hash);
- if (it == pre_zygote_class_table_.end()) {
- it = class_table_.FindWithHash(descriptor_pair, hash);
- if (it == class_table_.end()) {
- return nullptr;
+ // Searching the image dex files/caches failed, we don't want to get into this situation
+ // often as map searches are faster, so after kMaxFailedDexCacheLookups move all image
+ // classes into the class table.
+ constexpr uint32_t kMaxFailedDexCacheLookups = 1000;
+ if (++failed_dex_cache_class_lookups_ > kMaxFailedDexCacheLookups) {
+ MoveImageClassesToClassTable();
}
}
- return it->Read();
+ return result;
}
static mirror::ObjectArray<mirror::DexCache>* GetImageDexCaches()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetImageSpace();
CHECK(image != nullptr);
mirror::Object* root = image->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
@@ -2922,6 +2897,7 @@
ScopedAssertNoThreadSuspension ants(self, "Moving image classes to class table");
mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches();
std::string temp;
+ ClassTable* const class_table = InsertClassTableForClassLoader(nullptr);
for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
mirror::DexCache* dex_cache = dex_caches->Get(i);
mirror::ObjectArray<mirror::Class>* types = dex_cache->GetResolvedTypes();
@@ -2931,12 +2907,12 @@
DCHECK(klass->GetClassLoader() == nullptr);
const char* descriptor = klass->GetDescriptor(&temp);
size_t hash = ComputeModifiedUtf8Hash(descriptor);
- mirror::Class* existing = LookupClassFromTableLocked(descriptor, nullptr, hash);
+ mirror::Class* existing = class_table->Lookup(descriptor, hash);
if (existing != nullptr) {
CHECK_EQ(existing, klass) << PrettyClassAndClassLoader(existing) << " != "
<< PrettyClassAndClassLoader(klass);
} else {
- class_table_.Insert(GcRoot<mirror::Class>(klass));
+ class_table->Insert(klass);
if (log_new_class_table_roots_) {
new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
}
@@ -2949,9 +2925,9 @@
void ClassLinker::MoveClassTableToPreZygote() {
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- DCHECK(pre_zygote_class_table_.Empty());
- pre_zygote_class_table_ = std::move(class_table_);
- class_table_.Clear();
+ for (auto& class_table : classes_) {
+ class_table.second->FreezeSnapshot();
+ }
}
mirror::Class* ClassLinker::LookupClassFromImage(const char* descriptor) {
@@ -2977,37 +2953,21 @@
return nullptr;
}
-void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Class*>& result) {
+void ClassLinker::LookupClasses(const char* descriptor,
+ out<std::vector<mirror::Class*>> out_result) {
+ std::vector<mirror::Class*>& result = *out_result;
result.clear();
if (dex_cache_image_class_lookup_required_) {
MoveImageClassesToClassTable();
}
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- while (true) {
- auto it = class_table_.Find(descriptor);
- if (it == class_table_.end()) {
- break;
+ for (auto& pair : classes_) {
+ // There can only be one class with the same descriptor per class loader.
+ ClassTable* const class_table = pair.second;
+ mirror::Class* klass = class_table->Lookup(descriptor, ComputeModifiedUtf8Hash(descriptor));
+ if (klass != nullptr) {
+ result.push_back(klass);
}
- result.push_back(it->Read());
- class_table_.Erase(it);
- }
- for (mirror::Class* k : result) {
- class_table_.Insert(GcRoot<mirror::Class>(k));
- }
- size_t pre_zygote_start = result.size();
- // Now handle the pre zygote table.
- // Note: This dirties the pre-zygote table but shouldn't be an issue since LookupClasses is only
- // called from the debugger.
- while (true) {
- auto it = pre_zygote_class_table_.Find(descriptor);
- if (it == pre_zygote_class_table_.end()) {
- break;
- }
- result.push_back(it->Read());
- pre_zygote_class_table_.Erase(it);
- }
- for (size_t i = pre_zygote_start; i < result.size(); ++i) {
- pre_zygote_class_table_.Insert(GcRoot<mirror::Class>(result[i]));
}
}
@@ -3172,7 +3132,8 @@
}
}
-bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class* klass,
+bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file,
+ mirror::Class* klass,
mirror::Class::Status& oat_file_class_status) {
// If we're compiling, we can only verify the class using the oat file if
// we are not compiling the image or if the class we're verifying is not part of
@@ -3277,14 +3238,13 @@
}
const uint8_t* handlers_ptr = DexFile::GetCatchHandlerData(*code_item, 0);
uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
- ClassLinker* linker = Runtime::Current()->GetClassLinker();
for (uint32_t idx = 0; idx < handlers_size; idx++) {
CatchHandlerIterator iterator(handlers_ptr);
for (; iterator.HasNext(); iterator.Next()) {
// Ensure exception types are resolved so that they don't need resolution to be delivered,
// unresolved exception types will be ignored by exception delivery
if (iterator.GetHandlerTypeIndex() != DexFile::kDexNoIndex16) {
- mirror::Class* exception_type = linker->ResolveType(iterator.GetHandlerTypeIndex(), method);
+ mirror::Class* exception_type = ResolveType(iterator.GetHandlerTypeIndex(), method);
if (exception_type == nullptr) {
DCHECK(Thread::Current()->IsExceptionPending());
Thread::Current()->ClearException();
@@ -3295,9 +3255,12 @@
}
}
-mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, jstring name,
- jobjectArray interfaces, jobject loader,
- jobjectArray methods, jobjectArray throws) {
+mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa,
+ jstring name,
+ jobjectArray interfaces,
+ jobject loader,
+ jobjectArray methods,
+ jobjectArray throws) {
Thread* self = soa.Self();
StackHandleScope<10> hs(self);
MutableHandle<mirror::Class> klass(hs.NewHandle(
@@ -3316,7 +3279,7 @@
klass->SetDexCache(GetClassRoot(kJavaLangReflectProxy)->GetDexCache());
mirror::Class::SetStatus(klass, mirror::Class::kStatusIdx, self);
std::string descriptor(GetDescriptorForProxy(klass.Get()));
- size_t hash = ComputeModifiedUtf8Hash(descriptor.c_str());
+ const size_t hash = ComputeModifiedUtf8Hash(descriptor.c_str());
// Insert the class before loading the fields as the field roots
// (ArtField::declaring_class_) are only visited from the class
@@ -3392,7 +3355,7 @@
// The new class will replace the old one in the class table.
Handle<mirror::ObjectArray<mirror::Class>> h_interfaces(
hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces)));
- if (!LinkClass(self, descriptor.c_str(), klass, h_interfaces, &new_class)) {
+ if (!LinkClass(self, descriptor.c_str(), klass, h_interfaces, outof(new_class))) {
mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
return nullptr;
}
@@ -3450,8 +3413,7 @@
return DotToDescriptor(name->ToModifiedUtf8().c_str());
}
-ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class,
- ArtMethod* proxy_method) {
+ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method) {
DCHECK(proxy_class->IsProxyClass());
DCHECK(proxy_method->IsProxyMethod());
{
@@ -3498,7 +3460,8 @@
DCHECK(constructor->IsPublic());
}
-void ClassLinker::CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype,
+void ClassLinker::CreateProxyMethod(Handle<mirror::Class> klass,
+ ArtMethod* prototype,
ArtMethod* out) {
// Ensure prototype is in dex cache so that we can use the dex cache to look up the overridden
// prototype method
@@ -3544,7 +3507,8 @@
CHECK_EQ(np->GetReturnType(), prototype->GetReturnType());
}
-bool ClassLinker::CanWeInitializeClass(mirror::Class* klass, bool can_init_statics,
+bool ClassLinker::CanWeInitializeClass(mirror::Class* klass,
+ bool can_init_statics,
bool can_init_parents) {
if (can_init_statics && can_init_parents) {
return true;
@@ -3574,8 +3538,10 @@
return CanWeInitializeClass(super_class, can_init_statics, can_init_parents);
}
-bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
- bool can_init_statics, bool can_init_parents) {
+bool ClassLinker::InitializeClass(Thread* self,
+ Handle<mirror::Class> klass,
+ bool can_init_statics,
+ bool can_init_parents) {
// see JLS 3rd edition, 12.4.2 "Detailed Initialization Procedure" for the locking protocol
// Are we already initialized and therefore done?
@@ -3644,7 +3610,7 @@
return true;
}
// No. That's fine. Wait for another thread to finish initializing.
- return WaitForInitializeClass(klass, self, lock);
+ return WaitForInitializeClass(klass, self, &lock);
}
if (!ValidateSuperClassDescriptors(klass)) {
@@ -3778,13 +3744,16 @@
return success;
}
-bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass, Thread* self,
- ObjectLock<mirror::Class>& lock)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass,
+ Thread* self,
+ ObjectLock<mirror::Class>* lock)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(lock != nullptr);
+
while (true) {
self->AssertNoPendingException();
CHECK(!klass->IsInitialized());
- lock.WaitIgnoringInterrupts();
+ lock->WaitIgnoringInterrupts();
// When we wake up, repeat the test for init-in-progress. If
// there's an exception pending (only possible if
@@ -3824,7 +3793,7 @@
Handle<mirror::Class> super_klass,
ArtMethod* method,
ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(Thread::Current()->IsExceptionPending());
DCHECK(!m->IsProxyMethod());
const DexFile* dex_file = m->GetDexFile();
@@ -3847,8 +3816,9 @@
Handle<mirror::Class> super_klass,
ArtMethod* method,
ArtMethod* m,
- uint32_t index, uint32_t arg_type_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t index,
+ uint32_t arg_type_idx)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(Thread::Current()->IsExceptionPending());
DCHECK(!m->IsProxyMethod());
const DexFile* dex_file = m->GetDexFile();
@@ -3868,7 +3838,7 @@
Handle<mirror::Class> super_klass,
ArtMethod* method,
const std::string& error_msg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ThrowLinkageError(klass.Get(),
"Class %s method %s resolves differently in %s %s: %s",
PrettyDescriptor(klass.Get()).c_str(),
@@ -3883,7 +3853,7 @@
Handle<mirror::Class> super_klass,
ArtMethod* method1,
ArtMethod* method2)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
{
StackHandleScope<1> hs(self);
Handle<mirror::Class> return_type(hs.NewHandle(method1->GetReturnType()));
@@ -4009,7 +3979,8 @@
return true;
}
-bool ClassLinker::EnsureInitialized(Thread* self, Handle<mirror::Class> c, bool can_init_fields,
+bool ClassLinker::EnsureInitialized(Thread* self, Handle<mirror::Class> c,
+ bool can_init_fields,
bool can_init_parents) {
DCHECK(c.Get() != nullptr);
if (c->IsInitialized()) {
@@ -4030,7 +4001,7 @@
void ClassLinker::FixupTemporaryDeclaringClass(mirror::Class* temp_class,
mirror::Class* new_class) {
ArtField* fields = new_class->GetIFields();
- DCHECK_EQ(temp_class->NumInstanceFields(), new_class->NumInstanceFields());
+ DCHECK_EQ(temp_class->NumInstanceFields(), 0u);
for (size_t i = 0, count = new_class->NumInstanceFields(); i < count; i++) {
if (fields[i].GetDeclaringClass() == temp_class) {
fields[i].SetDeclaringClass(new_class);
@@ -4038,31 +4009,56 @@
}
fields = new_class->GetSFields();
- DCHECK_EQ(temp_class->NumStaticFields(), new_class->NumStaticFields());
+ DCHECK_EQ(temp_class->NumStaticFields(), 0u);
for (size_t i = 0, count = new_class->NumStaticFields(); i < count; i++) {
if (fields[i].GetDeclaringClass() == temp_class) {
fields[i].SetDeclaringClass(new_class);
}
}
- DCHECK_EQ(temp_class->NumDirectMethods(), new_class->NumDirectMethods());
+ DCHECK_EQ(temp_class->NumDirectMethods(), 0u);
for (auto& method : new_class->GetDirectMethods(image_pointer_size_)) {
if (method.GetDeclaringClass() == temp_class) {
method.SetDeclaringClass(new_class);
}
}
- DCHECK_EQ(temp_class->NumVirtualMethods(), new_class->NumVirtualMethods());
+ DCHECK_EQ(temp_class->NumVirtualMethods(), 0u);
for (auto& method : new_class->GetVirtualMethods(image_pointer_size_)) {
if (method.GetDeclaringClass() == temp_class) {
method.SetDeclaringClass(new_class);
}
}
+
+ // Make sure the remembered set and mod-union tables know that we updated some of the native
+ // roots.
+ Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(new_class);
}
-bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror::Class> klass,
+ClassTable* ClassLinker::InsertClassTableForClassLoader(mirror::ClassLoader* class_loader) {
+ auto it = classes_.find(GcRoot<mirror::ClassLoader>(class_loader));
+ if (it != classes_.end()) {
+ return it->second;
+ }
+ // Class table for loader not found, add it to the table.
+ auto* const class_table = new ClassTable;
+ classes_.Put(GcRoot<mirror::ClassLoader>(class_loader), class_table);
+ return class_table;
+}
+
+ClassTable* ClassLinker::ClassTableForClassLoader(mirror::ClassLoader* class_loader) {
+ auto it = classes_.find(GcRoot<mirror::ClassLoader>(class_loader));
+ if (it != classes_.end()) {
+ return it->second;
+ }
+ return nullptr;
+}
+
+bool ClassLinker::LinkClass(Thread* self,
+ const char* descriptor,
+ Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
- MutableHandle<mirror::Class>* h_new_class_out) {
+ out<MutableHandle<mirror::Class>> h_new_class_out) {
CHECK_EQ(mirror::Class::kStatusLoaded, klass->GetStatus());
if (!LinkSuperClass(klass)) {
@@ -4070,14 +4066,14 @@
}
ArtMethod* imt[mirror::Class::kImtSize];
std::fill_n(imt, arraysize(imt), Runtime::Current()->GetImtUnimplementedMethod());
- if (!LinkMethods(self, klass, interfaces, imt)) {
+ if (!LinkMethods(self, klass, interfaces, outof(imt))) {
return false;
}
if (!LinkInstanceFields(self, klass)) {
return false;
}
size_t class_size;
- if (!LinkStaticFields(self, klass, &class_size)) {
+ if (!LinkStaticFields(self, klass, outof(class_size))) {
return false;
}
CreateReferenceInstanceOffsets(klass);
@@ -4101,6 +4097,14 @@
// Retire the temporary class and create the correctly sized resolved class.
StackHandleScope<1> hs(self);
auto h_new_class = hs.NewHandle(klass->CopyOf(self, class_size, imt, image_pointer_size_));
+ // Set array lengths to 0 since we don't want the GC to visit two different classes with the
+ // same ArtFields with the same If this occurs, it causes bugs in remembered sets since the GC
+ // may not see any references to the from space and clean the card. Though there was references
+ // to the from space that got marked by the first class.
+ klass->SetNumDirectMethods(0);
+ klass->SetNumVirtualMethods(0);
+ klass->SetNumStaticFields(0);
+ klass->SetNumInstanceFields(0);
if (UNLIKELY(h_new_class.Get() == nullptr)) {
self->AssertPendingOOMException();
mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
@@ -4110,9 +4114,26 @@
CHECK_EQ(h_new_class->GetClassSize(), class_size);
ObjectLock<mirror::Class> lock(self, h_new_class);
FixupTemporaryDeclaringClass(klass.Get(), h_new_class.Get());
- mirror::Class* existing = UpdateClass(descriptor, h_new_class.Get(),
- ComputeModifiedUtf8Hash(descriptor));
- CHECK(existing == nullptr || existing == klass.Get());
+
+ {
+ WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
+ mirror::ClassLoader* const class_loader = h_new_class.Get()->GetClassLoader();
+ ClassTable* const table = InsertClassTableForClassLoader(class_loader);
+ mirror::Class* existing = table->UpdateClass(descriptor, h_new_class.Get(),
+ ComputeModifiedUtf8Hash(descriptor));
+ CHECK_EQ(existing, klass.Get());
+ if (kIsDebugBuild && class_loader == nullptr && dex_cache_image_class_lookup_required_) {
+ // Check a class loaded with the system class loader matches one in the image if the class
+ // is in the image.
+ mirror::Class* const image_class = LookupClassFromImage(descriptor);
+ if (image_class != nullptr) {
+ CHECK_EQ(klass.Get(), existing) << descriptor;
+ }
+ }
+ if (log_new_class_table_roots_) {
+ new_class_roots_.push_back(GcRoot<mirror::Class>(h_new_class.Get()));
+ }
+ }
// This will notify waiters on temp class that saw the not yet resolved class in the
// class_table_ during EnsureResolved.
@@ -4128,30 +4149,31 @@
return true;
}
-static void CountMethodsAndFields(ClassDataItemIterator& dex_data,
- size_t* virtual_methods,
- size_t* direct_methods,
- size_t* static_fields,
- size_t* instance_fields) {
+static void CountMethodsAndFields(ClassDataItemIterator* dex_data,
+ out<size_t> virtual_methods,
+ out<size_t> direct_methods,
+ out<size_t> static_fields,
+ out<size_t> instance_fields) {
+ DCHECK(dex_data != nullptr);
*virtual_methods = *direct_methods = *static_fields = *instance_fields = 0;
- while (dex_data.HasNextStaticField()) {
- dex_data.Next();
+ while (dex_data->HasNextStaticField()) {
+ dex_data->Next();
(*static_fields)++;
}
- while (dex_data.HasNextInstanceField()) {
- dex_data.Next();
+ while (dex_data->HasNextInstanceField()) {
+ dex_data->Next();
(*instance_fields)++;
}
- while (dex_data.HasNextDirectMethod()) {
+ while (dex_data->HasNextDirectMethod()) {
(*direct_methods)++;
- dex_data.Next();
+ dex_data->Next();
}
- while (dex_data.HasNextVirtualMethod()) {
+ while (dex_data->HasNextVirtualMethod()) {
(*virtual_methods)++;
- dex_data.Next();
+ dex_data->Next();
}
- DCHECK(!dex_data.HasNext());
+ DCHECK(!dex_data->HasNext());
}
static void DumpClass(std::ostream& os,
@@ -4185,8 +4207,10 @@
}
}
-static std::string DumpClasses(const DexFile& dex_file1, const DexFile::ClassDef& dex_class_def1,
- const DexFile& dex_file2, const DexFile::ClassDef& dex_class_def2) {
+static std::string DumpClasses(const DexFile& dex_file1,
+ const DexFile::ClassDef& dex_class_def1,
+ const DexFile& dex_file2,
+ const DexFile::ClassDef& dex_class_def2) {
std::ostringstream os;
DumpClass(os, dex_file1, dex_class_def1, " (Compile time)");
DumpClass(os, dex_file2, dex_class_def2, " (Runtime)");
@@ -4196,20 +4220,28 @@
// Very simple structural check on whether the classes match. Only compares the number of
// methods and fields.
-static bool SimpleStructuralCheck(const DexFile& dex_file1, const DexFile::ClassDef& dex_class_def1,
- const DexFile& dex_file2, const DexFile::ClassDef& dex_class_def2,
+static bool SimpleStructuralCheck(const DexFile& dex_file1,
+ const DexFile::ClassDef& dex_class_def1,
+ const DexFile& dex_file2,
+ const DexFile::ClassDef& dex_class_def2,
std::string* error_msg) {
ClassDataItemIterator dex_data1(dex_file1, dex_file1.GetClassData(dex_class_def1));
ClassDataItemIterator dex_data2(dex_file2, dex_file2.GetClassData(dex_class_def2));
// Counters for current dex file.
size_t dex_virtual_methods1, dex_direct_methods1, dex_static_fields1, dex_instance_fields1;
- CountMethodsAndFields(dex_data1, &dex_virtual_methods1, &dex_direct_methods1, &dex_static_fields1,
- &dex_instance_fields1);
+ CountMethodsAndFields(&dex_data1,
+ outof(dex_virtual_methods1),
+ outof(dex_direct_methods1),
+ outof(dex_static_fields1),
+ outof(dex_instance_fields1));
// Counters for compile-time dex file.
size_t dex_virtual_methods2, dex_direct_methods2, dex_static_fields2, dex_instance_fields2;
- CountMethodsAndFields(dex_data2, &dex_virtual_methods2, &dex_direct_methods2, &dex_static_fields2,
- &dex_instance_fields2);
+ CountMethodsAndFields(&dex_data2,
+ outof(dex_virtual_methods2),
+ outof(dex_direct_methods2),
+ outof(dex_static_fields2),
+ outof(dex_instance_fields2));
if (dex_virtual_methods1 != dex_virtual_methods2) {
std::string class_dump = DumpClasses(dex_file1, dex_class_def1, dex_file2, dex_class_def2);
@@ -4245,7 +4277,7 @@
const DexFile& dex_file,
const DexFile::ClassDef& class_def,
mirror::Class* super_class)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Check for unexpected changes in the superclass.
// Quick check 1) is the super_class class-loader the boot class loader? This always has
// precedence.
@@ -4412,9 +4444,10 @@
}
// Populate the class vtable and itable. Compute return type indices.
-bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass,
+bool ClassLinker::LinkMethods(Thread* self,
+ Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
- ArtMethod** out_imt) {
+ out<ArtMethod* [mirror::Class::kImtSize]> out_imt) {
self->AllowThreadSuspension();
if (klass->IsInterface()) {
// No vtable.
@@ -4429,7 +4462,10 @@
} else if (!LinkVirtualMethods(self, klass)) { // Link virtual methods first.
return false;
}
- return LinkInterfaceMethods(self, klass, interfaces, out_imt); // Link interface method last.
+ return LinkInterfaceMethods(self,
+ klass,
+ interfaces,
+ outof_forward(out_imt)); // Link interface method last.
}
// Comparator for name and signature of a method, used in finding overriding methods. Implementation
@@ -4438,7 +4474,7 @@
class MethodNameAndSignatureComparator FINAL : public ValueObject {
public:
explicit MethodNameAndSignatureComparator(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+ SHARED_REQUIRES(Locks::mutator_lock_) :
dex_file_(method->GetDexFile()), mid_(&dex_file_->GetMethodId(method->GetDexMethodIndex())),
name_(nullptr), name_len_(0) {
DCHECK(!method->IsProxyMethod()) << PrettyMethod(method);
@@ -4452,7 +4488,7 @@
}
bool HasSameNameAndSignature(ArtMethod* other)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!other->IsProxyMethod()) << PrettyMethod(other);
const DexFile* other_dex_file = other->GetDexFile();
const DexFile::MethodId& other_mid = other_dex_file->GetMethodId(other->GetDexMethodIndex());
@@ -4482,13 +4518,15 @@
class LinkVirtualHashTable {
public:
- LinkVirtualHashTable(Handle<mirror::Class> klass, size_t hash_size, uint32_t* hash_table,
+ LinkVirtualHashTable(Handle<mirror::Class> klass,
+ size_t hash_size,
+ uint32_t* hash_table,
size_t image_pointer_size)
: klass_(klass), hash_size_(hash_size), hash_table_(hash_table),
image_pointer_size_(image_pointer_size) {
std::fill(hash_table_, hash_table_ + hash_size_, invalid_index_);
}
- void Add(uint32_t virtual_method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void Add(uint32_t virtual_method_index) SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* local_method = klass_->GetVirtualMethodDuringLinking(
virtual_method_index, image_pointer_size_);
const char* name = local_method->GetInterfaceMethodIfProxy(image_pointer_size_)->GetName();
@@ -4503,7 +4541,7 @@
hash_table_[index] = virtual_method_index;
}
uint32_t FindAndRemove(MethodNameAndSignatureComparator* comparator)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const char* name = comparator->GetName();
uint32_t hash = ComputeModifiedUtf8Hash(name);
size_t index = hash % hash_size_;
@@ -4686,9 +4724,12 @@
return true;
}
-bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass,
+bool ClassLinker::LinkInterfaceMethods(Thread* self,
+ Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
- ArtMethod** out_imt) {
+ out<ArtMethod* [mirror::Class::kImtSize]> out_imt_array) {
+ auto& out_imt = *out_imt_array;
+
StackHandleScope<3> hs(self);
Runtime* const runtime = Runtime::Current();
const bool has_superclass = klass->HasSuperClass();
@@ -4876,7 +4917,7 @@
}
}
- auto* old_cause = self->StartAssertNoThreadSuspension(
+ const char* old_cause = self->StartAssertNoThreadSuspension(
"Copying ArtMethods for LinkInterfaceMethods");
for (size_t i = 0; i < ifcount; ++i) {
size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods();
@@ -4987,6 +5028,7 @@
self, old_virtuals, old_method_count * method_size, new_method_count * method_size));
if (UNLIKELY(virtuals == nullptr)) {
self->AssertPendingOOMException();
+ self->EndAssertNoThreadSuspension(old_cause);
return false;
}
ScopedArenaUnorderedMap<ArtMethod*, ArtMethod*> move_table(allocator.Adapter());
@@ -5096,16 +5138,20 @@
bool ClassLinker::LinkInstanceFields(Thread* self, Handle<mirror::Class> klass) {
CHECK(klass.Get() != nullptr);
- return LinkFields(self, klass, false, nullptr);
+ size_t class_size_dont_care;
+ UNUSED(class_size_dont_care); // This doesn't get set for instance fields.
+ return LinkFields(self, klass, false, outof(class_size_dont_care));
}
-bool ClassLinker::LinkStaticFields(Thread* self, Handle<mirror::Class> klass, size_t* class_size) {
+bool ClassLinker::LinkStaticFields(Thread* self,
+ Handle<mirror::Class> klass,
+ out<size_t> class_size) {
CHECK(klass.Get() != nullptr);
- return LinkFields(self, klass, true, class_size);
+ return LinkFields(self, klass, true, outof_forward(class_size));
}
struct LinkFieldsComparator {
- explicit LinkFieldsComparator() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ explicit LinkFieldsComparator() SHARED_REQUIRES(Locks::mutator_lock_) {
}
// No thread safety analysis as will be called from STL. Checked lock held in constructor.
bool operator()(ArtField* field1, ArtField* field2)
@@ -5138,8 +5184,10 @@
}
};
-bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static,
- size_t* class_size) {
+bool ClassLinker::LinkFields(Thread* self,
+ Handle<mirror::Class> klass,
+ bool is_static,
+ out<size_t> class_size) {
self->AllowThreadSuspension();
const size_t num_fields = is_static ? klass->NumStaticFields() : klass->NumInstanceFields();
ArtField* const fields = is_static ? klass->GetSFields() : klass->GetIFields();
@@ -5310,7 +5358,8 @@
klass->SetReferenceInstanceOffsets(reference_offsets);
}
-mirror::String* ClassLinker::ResolveString(const DexFile& dex_file, uint32_t string_idx,
+mirror::String* ClassLinker::ResolveString(const DexFile& dex_file,
+ uint32_t string_idx,
Handle<mirror::DexCache> dex_cache) {
DCHECK(dex_cache.Get() != nullptr);
mirror::String* resolved = dex_cache->GetResolvedString(string_idx);
@@ -5324,7 +5373,8 @@
return string;
}
-mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_idx,
+mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file,
+ uint16_t type_idx,
mirror::Class* referrer) {
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
@@ -5332,7 +5382,8 @@
return ResolveType(dex_file, type_idx, dex_cache, class_loader);
}
-mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_idx,
+mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file,
+ uint16_t type_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader) {
DCHECK(dex_cache.Get() != nullptr);
@@ -5365,7 +5416,8 @@
return resolved;
}
-ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t method_idx,
+ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file,
+ uint32_t method_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
ArtMethod* referrer, InvokeType type) {
@@ -5522,9 +5574,11 @@
}
}
-ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t field_idx,
+ArtField* ClassLinker::ResolveField(const DexFile& dex_file,
+ uint32_t field_idx,
Handle<mirror::DexCache> dex_cache,
- Handle<mirror::ClassLoader> class_loader, bool is_static) {
+ Handle<mirror::ClassLoader> class_loader,
+ bool is_static) {
DCHECK(dex_cache.Get() != nullptr);
ArtField* resolved = dex_cache->GetResolvedField(field_idx, image_pointer_size_);
if (resolved != nullptr) {
@@ -5563,7 +5617,8 @@
return resolved;
}
-ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file, uint32_t field_idx,
+ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file,
+ uint32_t field_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader) {
DCHECK(dex_cache.Get() != nullptr);
@@ -5593,7 +5648,8 @@
return resolved;
}
-const char* ClassLinker::MethodShorty(uint32_t method_idx, ArtMethod* referrer,
+const char* ClassLinker::MethodShorty(uint32_t method_idx,
+ ArtMethod* referrer,
uint32_t* length) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
mirror::DexCache* dex_cache = declaring_class->GetDexCache();
@@ -5602,23 +5658,22 @@
return dex_file.GetMethodShorty(method_id, length);
}
-void ClassLinker::DumpAllClasses(int flags) {
- if (dex_cache_image_class_lookup_required_) {
- MoveImageClassesToClassTable();
- }
- // TODO: at the time this was written, it wasn't safe to call PrettyField with the ClassLinker
- // lock held, because it might need to resolve a field's type, which would try to take the lock.
- std::vector<mirror::Class*> all_classes;
- {
- ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- for (GcRoot<mirror::Class>& it : class_table_) {
- all_classes.push_back(it.Read());
- }
+class DumpClassVisitor : public ClassVisitor {
+ public:
+ explicit DumpClassVisitor(int flags) : flags_(flags) {}
+
+ bool Visit(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ klass->DumpClass(LOG(ERROR), flags_);
+ return true;
}
- for (size_t i = 0; i < all_classes.size(); ++i) {
- all_classes[i]->DumpClass(std::cerr, flags);
- }
+ private:
+ const int flags_;
+};
+
+void ClassLinker::DumpAllClasses(int flags) {
+ DumpClassVisitor visitor(flags);
+ VisitClasses(&visitor);
}
static OatFile::OatMethod CreateOatMethod(const void* code) {
@@ -5671,8 +5726,24 @@
MoveImageClassesToClassTable();
}
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
- os << "Zygote loaded classes=" << pre_zygote_class_table_.Size() << " post zygote classes="
- << class_table_.Size() << "\n";
+ os << "Zygote loaded classes=" << NumZygoteClasses() << " post zygote classes="
+ << NumNonZygoteClasses() << "\n";
+}
+
+size_t ClassLinker::NumZygoteClasses() const {
+ size_t sum = 0;
+ for (auto& pair : classes_) {
+ sum += pair.second->NumZygoteClasses();
+ }
+ return sum;
+}
+
+size_t ClassLinker::NumNonZygoteClasses() const {
+ size_t sum = 0;
+ for (auto& pair : classes_) {
+ sum += pair.second->NumNonZygoteClasses();
+ }
+ return sum;
}
size_t ClassLinker::NumLoadedClasses() {
@@ -5681,7 +5752,7 @@
}
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
// Only return non zygote classes since these are the ones which apps which care about.
- return class_table_.Size();
+ return NumNonZygoteClasses();
}
pid_t ClassLinker::GetClassesLockOwner() {
@@ -5752,43 +5823,6 @@
return descriptor;
}
-std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& root)
- const {
- std::string temp;
- return ComputeModifiedUtf8Hash(root.Read()->GetDescriptor(&temp));
-}
-
-bool ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a,
- const GcRoot<mirror::Class>& b) const {
- if (a.Read()->GetClassLoader() != b.Read()->GetClassLoader()) {
- return false;
- }
- std::string temp;
- return a.Read()->DescriptorEquals(b.Read()->GetDescriptor(&temp));
-}
-
-std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(
- const std::pair<const char*, mirror::ClassLoader*>& element) const {
- return ComputeModifiedUtf8Hash(element.first);
-}
-
-bool ClassLinker::ClassDescriptorHashEquals::operator()(
- const GcRoot<mirror::Class>& a, const std::pair<const char*, mirror::ClassLoader*>& b) const {
- if (a.Read()->GetClassLoader() != b.second) {
- return false;
- }
- return a.Read()->DescriptorEquals(b.first);
-}
-
-bool ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a,
- const char* descriptor) const {
- return a.Read()->DescriptorEquals(descriptor);
-}
-
-std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(const char* descriptor) const {
- return ComputeModifiedUtf8Hash(descriptor);
-}
-
bool ClassLinker::MayBeCalledWithDirectCodePointer(ArtMethod* m) {
if (Runtime::Current()->UseJit()) {
// JIT can have direct code pointers from any method to any other method.
@@ -5822,7 +5856,8 @@
}
}
-jobject ClassLinker::CreatePathClassLoader(Thread* self, std::vector<const DexFile*>& dex_files) {
+jobject ClassLinker::CreatePathClassLoader(Thread* self,
+ const std::vector<const DexFile*>& dex_files) {
// SOAAlreadyRunnable is protected, and we need something to add a global reference.
// We could move the jobject to the callers, but all call-sites do this...
ScopedObjectAccessUnchecked soa(self);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index b60cba4..54f1f3d 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -25,6 +25,8 @@
#include "base/hash_set.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "base/out_fwd.h"
+#include "class_table.h"
#include "dex_file.h"
#include "gc_root.h"
#include "jni.h"
@@ -56,8 +58,6 @@
class ScopedObjectAccessAlreadyRunnable;
template<size_t kNumReferences> class PACKED(4) StackHandleScope;
-typedef bool (ClassVisitor)(mirror::Class* c, void* arg);
-
enum VisitRootFlags : uint8_t;
class ClassLinker {
@@ -109,16 +109,19 @@
// Initialize class linker by bootstraping from dex files.
void InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> boot_class_path)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
// Initialize class linker from one or more images.
- void InitFromImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void InitFromImage() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
// Finds a class by its descriptor, loading it if necessary.
// If class_loader is null, searches boot_class_path_.
- mirror::Class* FindClass(Thread* self, const char* descriptor,
+ mirror::Class* FindClass(Thread* self,
+ const char* descriptor,
Handle<mirror::ClassLoader> class_loader)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
// Finds a class in the path class loader, loading it if necessary without using JNI. Hash
// function is supposed to be ComputeModifiedUtf8Hash(descriptor). Returns true if the
@@ -126,19 +129,24 @@
// was encountered while walking the parent chain (currently only BootClassLoader and
// PathClassLoader are supported).
bool FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
- Thread* self, const char* descriptor, size_t hash,
+ Thread* self,
+ const char* descriptor,
+ size_t hash,
Handle<mirror::ClassLoader> class_loader,
- mirror::Class** result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ out<mirror::Class*> result)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
// Finds a class by its descriptor using the "system" class loader, ie by searching the
// boot_class_path_.
mirror::Class* FindSystemClass(Thread* self, const char* descriptor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
// Finds the array class given for the element class.
- mirror::Class* FindArrayClass(Thread* self, mirror::Class** element_class)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Class* FindArrayClass(Thread* self, /* in parameter */ mirror::Class** element_class)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
// Returns true if the class linker is initialized.
bool IsInitialized() const {
@@ -146,100 +154,123 @@
}
// Define a new a class based on a ClassDef from a DexFile
- mirror::Class* DefineClass(Thread* self, const char* descriptor, size_t hash,
+ mirror::Class* DefineClass(Thread* self,
+ const char* descriptor,
+ size_t hash,
Handle<mirror::ClassLoader> class_loader,
- const DexFile& dex_file, const DexFile::ClassDef& dex_class_def)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const DexFile& dex_file,
+ const DexFile::ClassDef& dex_class_def)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
// Finds a class by its descriptor, returning null if it isn't wasn't loaded
// by the given 'class_loader'.
- mirror::Class* LookupClass(Thread* self, const char* descriptor, size_t hash,
- mirror::ClassLoader* class_loader)
- LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Class* LookupClass(Thread* self,
+ const char* descriptor,
+ size_t hash,
+ mirror::ClassLoader*
+ class_loader)
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Finds all the classes with the given descriptor, regardless of ClassLoader.
- void LookupClasses(const char* descriptor, std::vector<mirror::Class*>& classes)
- LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void LookupClasses(const char* descriptor, out<std::vector<mirror::Class*>> classes)
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Class* FindPrimitiveClass(char type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Class* FindPrimitiveClass(char type) SHARED_REQUIRES(Locks::mutator_lock_);
// General class unloading is not supported, this is used to prune
// unwanted classes during image writing.
- bool RemoveClass(const char* descriptor, mirror::ClassLoader* class_loader)
- LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool RemoveClass(const char* descriptor,
+ mirror::ClassLoader* class_loader)
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void DumpAllClasses(int flags)
- LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os)
- LOCKS_EXCLUDED(Locks::classlinker_classes_lock_);
+ REQUIRES(!Locks::classlinker_classes_lock_);
size_t NumLoadedClasses()
- LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Resolve a String with the given index from the DexFile, storing the
// result in the DexCache. The referrer is used to identify the
// target DexCache and ClassLoader to use for resolution.
mirror::String* ResolveString(uint32_t string_idx, ArtMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Resolve a String with the given index from the DexFile, storing the
// result in the DexCache.
- mirror::String* ResolveString(const DexFile& dex_file, uint32_t string_idx,
+ mirror::String* ResolveString(const DexFile& dex_file,
+ uint32_t string_idx,
Handle<mirror::DexCache> dex_cache)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Resolve a Type with the given index from the DexFile, storing the
// result in the DexCache. The referrer is used to identity the
// target DexCache and ClassLoader to use for resolution.
- mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, mirror::Class* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Class* ResolveType(const DexFile& dex_file,
+ uint16_t type_idx,
+ mirror::Class* referrer)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Resolve a Type with the given index from the DexFile, storing the
// result in the DexCache. The referrer is used to identify the
// target DexCache and ClassLoader to use for resolution.
- mirror::Class* ResolveType(uint16_t type_idx, ArtMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Class* ResolveType(uint16_t type_idx,
+ ArtMethod* referrer)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_, !Roles::uninterruptible_);
- mirror::Class* ResolveType(uint16_t type_idx, ArtField* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Class* ResolveType(uint16_t type_idx,
+ ArtField* referrer)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Resolve a type with the given ID from the DexFile, storing the
// result in DexCache. The ClassLoader is used to search for the
// type, since it may be referenced from but not contained within
// the given DexFile.
- mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx,
+ mirror::Class* ResolveType(const DexFile& dex_file,
+ uint16_t type_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Resolve a method with a given ID from the DexFile, storing the
// result in DexCache. The ClassLinker and ClassLoader are used as
// in ResolveType. What is unique is the method type argument which
// is used to determine if this method is a direct, static, or
// virtual method.
- ArtMethod* ResolveMethod(const DexFile& dex_file, uint32_t method_idx,
+ ArtMethod* ResolveMethod(const DexFile& dex_file,
+ uint32_t method_idx,
Handle<mirror::DexCache> dex_cache,
- Handle<mirror::ClassLoader> class_loader, ArtMethod* referrer,
+ Handle<mirror::ClassLoader> class_loader,
+ ArtMethod* referrer,
InvokeType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_, !Roles::uninterruptible_);
ArtMethod* GetResolvedMethod(uint32_t method_idx, ArtMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, ArtMethod* referrer, InvokeType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_, !Roles::uninterruptible_);
ArtField* GetResolvedField(uint32_t field_idx, mirror::Class* field_declaring_class)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtField* GetResolvedField(uint32_t field_idx, mirror::DexCache* dex_cache)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtField* ResolveField(uint32_t field_idx, ArtMethod* referrer, bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Resolve a field with a given ID from the DexFile, storing the
// result in DexCache. The ClassLinker and ClassLoader are used as
@@ -247,45 +278,50 @@
// used to determine if we are resolving a static or non-static
// field.
ArtField* ResolveField(const DexFile& dex_file,
- uint32_t field_idx,
- Handle<mirror::DexCache> dex_cache,
- Handle<mirror::ClassLoader> class_loader,
- bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t field_idx,
+ Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::ClassLoader> class_loader,
+ bool is_static)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Resolve a field with a given ID from the DexFile, storing the
// result in DexCache. The ClassLinker and ClassLoader are used as
// in ResolveType. No is_static argument is provided so that Java
// field resolution semantics are followed.
- ArtField* ResolveFieldJLS(const DexFile& dex_file, uint32_t field_idx,
+ ArtField* ResolveFieldJLS(const DexFile& dex_file,
+ uint32_t field_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Get shorty from method index without resolution. Used to do handlerization.
const char* MethodShorty(uint32_t method_idx, ArtMethod* referrer, uint32_t* length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true on success, false if there's an exception pending.
// can_run_clinit=false allows the compiler to attempt to init a class,
// given the restriction that no <clinit> execution is possible.
- bool EnsureInitialized(Thread* self, Handle<mirror::Class> c, bool can_init_fields,
+ bool EnsureInitialized(Thread* self,
+ Handle<mirror::Class> c,
+ bool can_init_fields,
bool can_init_parents)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Initializes classes that have instances in the image but that have
// <clinit> methods so they could not be initialized by the compiler.
- void RunRootClinits() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void RunRootClinits() SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_, !Roles::uninterruptible_);
void RegisterDexFile(const DexFile& dex_file)
- LOCKS_EXCLUDED(dex_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
void RegisterDexFile(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
- LOCKS_EXCLUDED(dex_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
const OatFile* RegisterOatFile(const OatFile* oat_file)
- LOCKS_EXCLUDED(dex_lock_);
+ REQUIRES(!dex_lock_);
const std::vector<const DexFile*>& GetBootClassPath() {
return boot_class_path_;
@@ -293,34 +329,36 @@
// Returns the first non-image oat file in the class path.
const OatFile* GetPrimaryOatFile()
- LOCKS_EXCLUDED(dex_lock_);
+ REQUIRES(!dex_lock_);
- void VisitClasses(ClassVisitor* visitor, void* arg)
- LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitClasses(ClassVisitor* visitor)
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Less efficient variant of VisitClasses that copies the class_table_ into secondary storage
// so that it can visit individual classes without holding the doesn't hold the
// Locks::classlinker_classes_lock_. As the Locks::classlinker_classes_lock_ isn't held this code
// can race with insertion and deletion of classes while the visitor is being called.
- void VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitClassesWithoutClassesLock(ClassVisitor* visitor)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
void VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags)
- LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
- LOCKS_EXCLUDED(dex_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!dex_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
mirror::DexCache* FindDexCache(const DexFile& dex_file)
- LOCKS_EXCLUDED(dex_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!dex_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool IsDexFileRegistered(const DexFile& dex_file)
- LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!dex_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void FixupDexCaches(ArtMethod* resolution_method)
- LOCKS_EXCLUDED(dex_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!dex_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Finds or creates the oat file holding dex_location. Then loads and returns
// all corresponding dex files (there may be more than one dex file loaded
@@ -336,83 +374,101 @@
// This method should not be called with the mutator_lock_ held, because it
// could end up starving GC if we need to generate or relocate any oat
// files.
- std::vector<std::unique_ptr<const DexFile>> OpenDexFilesFromOat(
- const char* dex_location, const char* oat_location,
- std::vector<std::string>* error_msgs)
- LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
+ std::vector<std::unique_ptr<const DexFile>> OpenDexFilesFromOat(const char* dex_location,
+ const char* oat_location,
+ out<std::vector<std::string>>
+ error_msgs)
+ REQUIRES(!dex_lock_, !Locks::mutator_lock_);
// Allocate an instance of a java.lang.Object.
- mirror::Object* AllocObject(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* AllocObject(Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
// TODO: replace this with multiple methods that allocate the correct managed type.
template <class T>
mirror::ObjectArray<T>* AllocObjectArray(Thread* self, size_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
mirror::ObjectArray<mirror::Class>* AllocClassArray(Thread* self, size_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
mirror::ObjectArray<mirror::String>* AllocStringArray(Thread* self, size_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
+
+ ArtField* AllocArtFieldArray(Thread* self, size_t length);
ArtMethod* AllocArtMethodArray(Thread* self, size_t length);
mirror::PointerArray* AllocPointerArray(Thread* self, size_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
mirror::IfTable* AllocIfTable(Thread* self, size_t ifcount)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- ArtField* AllocArtFieldArray(Thread* self, size_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
mirror::ObjectArray<mirror::StackTraceElement>* AllocStackTraceElementArray(Thread* self,
size_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
void VerifyClass(Thread* self, Handle<mirror::Class> klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class* klass,
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
+ bool VerifyClassUsingOatFile(const DexFile& dex_file,
+ mirror::Class* klass,
mirror::Class::Status& oat_file_class_status)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
void ResolveClassExceptionHandlerTypes(const DexFile& dex_file,
Handle<mirror::Class> klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, ArtMethod* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
- mirror::Class* CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, jstring name,
- jobjectArray interfaces, jobject loader, jobjectArray methods,
+ mirror::Class* CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa,
+ jstring name,
+ jobjectArray interfaces,
+ jobject loader,
+ jobjectArray methods,
jobjectArray throws)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
std::string GetDescriptorForProxy(mirror::Class* proxy_class)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method)
- LOCKS_EXCLUDED(dex_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* FindMethodForProxy(mirror::Class* proxy_class,
+ ArtMethod* proxy_method)
+ REQUIRES(!dex_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get the oat code for a method when its class isn't yet initialized
const void* GetQuickOatCodeFor(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get the oat code for a method from a method index.
- const void* GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
+ const void* GetQuickOatCodeFor(const DexFile& dex_file,
+ uint16_t class_def_idx,
uint32_t method_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get compiled code for a method, return null if no code
// exists. This is unlike Get..OatCodeFor which will return a bridge
// or interpreter entrypoint.
const void* GetOatMethodQuickCodeFor(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- const OatFile::OatMethod FindOatMethodFor(ArtMethod* method, bool* found)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const OatFile::OatMethod FindOatMethodFor(ArtMethod* method, out<bool> found)
+ SHARED_REQUIRES(Locks::mutator_lock_);
pid_t GetClassesLockOwner(); // For SignalCatcher.
pid_t GetDexLockOwner(); // For SignalCatcher.
- mirror::Class* GetClassRoot(ClassRoot class_root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Class* GetClassRoot(ClassRoot class_root) SHARED_REQUIRES(Locks::mutator_lock_);
static const char* GetClassRootDescriptor(ClassRoot class_root);
@@ -431,20 +487,20 @@
// Set the entrypoints up for method to the given code.
void SetEntryPointsToCompiledCode(ArtMethod* method, const void* method_code) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Set the entrypoints up for method to the enter the interpreter.
void SetEntryPointsToInterpreter(ArtMethod* method) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Attempts to insert a class into a class table. Returns null if
// the class was inserted, otherwise returns an existing class with
// the same descriptor and ClassLoader.
mirror::Class* InsertClass(const char* descriptor, mirror::Class* klass, size_t hash)
- LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::ObjectArray<mirror::Class>* GetClassRoots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ObjectArray<mirror::Class>* GetClassRoots() SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
DCHECK(class_roots != nullptr);
return class_roots;
@@ -452,23 +508,25 @@
// Move all of the image classes into the class table for faster lookups.
void MoveImageClassesToClassTable()
- LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Move the class table to the pre-zygote table to reduce memory usage. This works by ensuring
// that no more classes are ever added to the pre zygote table which makes it that the pages
// always remain shared dirty instead of private dirty.
void MoveClassTableToPreZygote()
- LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if the method can be called with its direct code pointer, false otherwise.
bool MayBeCalledWithDirectCodePointer(ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
// Creates a GlobalRef PathClassLoader that can be used to load classes from the given dex files.
// Note: the objects are not completely set up. Do not use this outside of tests and the compiler.
- jobject CreatePathClassLoader(Thread* self, std::vector<const DexFile*>& dex_files)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ jobject CreatePathClassLoader(Thread* self, const std::vector<const DexFile*>& dex_files)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
size_t GetImagePointerSize() const {
DCHECK(ValidPointerSize(image_pointer_size_)) << image_pointer_size_;
@@ -477,48 +535,77 @@
// Used by image writer for checking.
bool ClassInClassTable(mirror::Class* klass)
- LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* CreateRuntimeMethod();
// Clear the ArrayClass cache. This is necessary when cleaning up for the image, as the cache
// entries are roots, but potentially not image classes.
- void DropFindArrayClassCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void DropFindArrayClassCache() SHARED_REQUIRES(Locks::mutator_lock_);
private:
- OatFile& GetImageOatFile(gc::space::ImageSpace* space)
- LOCKS_EXCLUDED(dex_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ class CompareClassLoaderGcRoot {
+ public:
+ bool operator()(const GcRoot<mirror::ClassLoader>& a, const GcRoot<mirror::ClassLoader>& b)
+ const SHARED_REQUIRES(Locks::mutator_lock_) {
+ return a.Read() < b.Read();
+ }
+ };
- void FinishInit(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ typedef SafeMap<GcRoot<mirror::ClassLoader>, ClassTable*, CompareClassLoaderGcRoot>
+ ClassLoaderClassTable;
+
+ void VisitClassesInternal(ClassVisitor* visitor)
+ REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Returns the number of zygote and image classes.
+ size_t NumZygoteClasses() const REQUIRES(Locks::classlinker_classes_lock_);
+
+ // Returns the number of non zygote nor image classes.
+ size_t NumNonZygoteClasses() const REQUIRES(Locks::classlinker_classes_lock_);
+
+ OatFile& GetImageOatFile(gc::space::ImageSpace* space)
+ REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ void FinishInit(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// For early bootstrapping by Init
mirror::Class* AllocClass(Thread* self, mirror::Class* java_lang_Class, uint32_t class_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
// Alloc* convenience functions to avoid needing to pass in mirror::Class*
// values that are known to the ClassLinker such as
// kObjectArrayClass and kJavaLangString etc.
mirror::Class* AllocClass(Thread* self, uint32_t class_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
mirror::DexCache* AllocDexCache(Thread* self, const DexFile& dex_file)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
mirror::Class* CreatePrimitiveClass(Thread* self, Primitive::Type type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
mirror::Class* InitializePrimitiveClass(mirror::Class* primitive_class, Primitive::Type type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
-
- mirror::Class* CreateArrayClass(Thread* self, const char* descriptor, size_t hash,
+ mirror::Class* CreateArrayClass(Thread* self,
+ const char* descriptor,
+ size_t hash,
Handle<mirror::ClassLoader> class_loader)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_, !Roles::uninterruptible_);
void AppendToBootClassPath(Thread* self, const DexFile& dex_file)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
void AppendToBootClassPath(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
// Precomputes size needed for Class, in the case of a non-temporary class this size must be
// sufficient to hold all static fields.
@@ -527,133 +614,157 @@
// Setup the classloader, class def index, type idx so that we can insert this class in the class
// table.
- void SetupClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
- Handle<mirror::Class> klass, mirror::ClassLoader* class_loader)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetupClass(const DexFile& dex_file,
+ const DexFile::ClassDef& dex_class_def,
+ Handle<mirror::Class> klass,
+ mirror::ClassLoader* class_loader)
+ SHARED_REQUIRES(Locks::mutator_lock_);
- void LoadClass(Thread* self, const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
+ void LoadClass(Thread* self,
+ const DexFile& dex_file,
+ const DexFile::ClassDef& dex_class_def,
Handle<mirror::Class> klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void LoadClassMembers(Thread* self, const DexFile& dex_file, const uint8_t* class_data,
- Handle<mirror::Class> klass, const OatFile::OatClass* oat_class)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ void LoadClassMembers(Thread* self,
+ const DexFile& dex_file,
+ const uint8_t* class_data,
+ Handle<mirror::Class> klass,
+ const OatFile::OatClass* oat_class)
+ SHARED_REQUIRES(Locks::mutator_lock_);
- void LoadField(const ClassDataItemIterator& it, Handle<mirror::Class> klass,
+ void LoadField(const ClassDataItemIterator& it,
+ Handle<mirror::Class> klass,
ArtField* dst)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- void LoadMethod(Thread* self, const DexFile& dex_file, const ClassDataItemIterator& it,
- Handle<mirror::Class> klass, ArtMethod* dst)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void LoadMethod(Thread* self,
+ const DexFile& dex_file,
+ const ClassDataItemIterator& it,
+ Handle<mirror::Class> klass,
+ ArtMethod* dst)
+ SHARED_REQUIRES(Locks::mutator_lock_);
- void FixupStaticTrampolines(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FixupStaticTrampolines(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
// Finds the associated oat class for a dex_file and descriptor. Returns an invalid OatClass on
// error and sets found to false.
- OatFile::OatClass FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, bool* found)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ OatFile::OatClass FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, out<bool> found)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
- EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
bool IsDexFileRegisteredLocked(const DexFile& dex_file)
- SHARED_LOCKS_REQUIRED(dex_lock_, Locks::mutator_lock_);
+ SHARED_REQUIRES(dex_lock_, Locks::mutator_lock_);
- bool InitializeClass(Thread* self, Handle<mirror::Class> klass, bool can_run_clinit,
+ bool InitializeClass(Thread* self,
+ Handle<mirror::Class> klass,
+ bool can_run_clinit,
bool can_init_parents)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool WaitForInitializeClass(Handle<mirror::Class> klass, Thread* self,
- ObjectLock<mirror::Class>& lock);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
+ bool WaitForInitializeClass(Handle<mirror::Class> klass,
+ Thread* self,
+ ObjectLock<mirror::Class>* lock);
bool ValidateSuperClassDescriptors(Handle<mirror::Class> klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsSameDescriptorInDifferentClassContexts(Thread* self, const char* descriptor,
+ bool IsSameDescriptorInDifferentClassContexts(Thread* self,
+ const char* descriptor,
Handle<mirror::ClassLoader> class_loader1,
Handle<mirror::ClassLoader> class_loader2)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsSameMethodSignatureInDifferentClassContexts(Thread* self, ArtMethod* method,
- mirror::Class* klass1, mirror::Class* klass2)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsSameMethodSignatureInDifferentClassContexts(Thread* self,
+ ArtMethod* method,
+ mirror::Class* klass1,
+ mirror::Class* klass2)
+ SHARED_REQUIRES(Locks::mutator_lock_);
- bool LinkClass(Thread* self, const char* descriptor, Handle<mirror::Class> klass,
+ bool LinkClass(Thread* self,
+ const char* descriptor,
+ Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
- MutableHandle<mirror::Class>* h_new_class_out)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ out<MutableHandle<mirror::Class>> h_new_class_out)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::classlinker_classes_lock_);
bool LinkSuperClass(Handle<mirror::Class> klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool LoadSuperAndInterfaces(Handle<mirror::Class> klass, const DexFile& dex_file)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
- bool LinkMethods(Thread* self, Handle<mirror::Class> klass,
+ bool LinkMethods(Thread* self,
+ Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
- ArtMethod** out_imt)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ out<ArtMethod* [mirror::Class::kImtSize]> out_imt)
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- bool LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass,
+ bool LinkInterfaceMethods(Thread* self,
+ Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
- ArtMethod** out_imt)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ out<ArtMethod* [mirror::Class::kImtSize]> out_imt)
+ SHARED_REQUIRES(Locks::mutator_lock_);
- bool LinkStaticFields(Thread* self, Handle<mirror::Class> klass, size_t* class_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool LinkStaticFields(Thread* self,
+ Handle<mirror::Class> klass,
+ out<size_t> class_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool LinkInstanceFields(Thread* self, Handle<mirror::Class> klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static, size_t* class_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void LinkCode(ArtMethod* method, const OatFile::OatClass* oat_class,
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static, out<size_t> class_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ void LinkCode(ArtMethod* method,
+ const OatFile::OatClass* oat_class,
uint32_t class_def_method_index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CreateReferenceInstanceOffsets(Handle<mirror::Class> klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CheckProxyConstructor(ArtMethod* constructor) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// For use by ImageWriter to find DexCaches for its roots
ReaderWriterMutex* DexLock()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCK_RETURNED(dex_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) LOCK_RETURNED(dex_lock_) {
return &dex_lock_;
}
- size_t GetDexCacheCount() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, dex_lock_) {
+ size_t GetDexCacheCount() SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_) {
return dex_caches_.size();
}
- mirror::DexCache* GetDexCache(size_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, dex_lock_);
+ mirror::DexCache* GetDexCache(size_t idx) SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_);
const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location)
- LOCKS_EXCLUDED(dex_lock_);
+ REQUIRES(!dex_lock_);
// Returns the boot image oat file.
- const OatFile* GetBootOatFile() SHARED_LOCKS_REQUIRED(dex_lock_);
+ const OatFile* GetBootOatFile() SHARED_REQUIRES(dex_lock_);
void CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, ArtMethod* out)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Ensures that methods have the kAccPreverified bit set. We use the kAccPreverfied bit on the
// class access flags to determine whether this has been done before.
void EnsurePreverifiedMethods(Handle<mirror::Class> c)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- mirror::Class* LookupClassFromTableLocked(const char* descriptor,
- mirror::ClassLoader* class_loader,
- size_t hash)
- SHARED_LOCKS_REQUIRED(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
-
- mirror::Class* UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash)
- LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Class* LookupClassFromImage(const char* descriptor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Returns null if not found.
+ ClassTable* ClassTableForClassLoader(mirror::ClassLoader* class_loader)
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::classlinker_classes_lock_);
+ // Insert a new class table if not found.
+ ClassTable* InsertClassTableForClassLoader(mirror::ClassLoader* class_loader)
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::classlinker_classes_lock_);
// EnsureResolved is called to make sure that a class in the class_table_ has been resolved
// before returning it to the caller. Its the responsibility of the thread that placed the class
@@ -662,13 +773,13 @@
// retire a class, the version of the class in the table is returned and this may differ from
// the class passed in.
mirror::Class* EnsureResolved(Thread* self, const char* descriptor, mirror::Class* klass)
- WARN_UNUSED SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ WARN_UNUSED SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
void FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void SetClassRoot(ClassRoot class_root, mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Return the quick generic JNI stub for testing.
const void* GetRuntimeQuickGenericJniStub() const;
@@ -677,20 +788,23 @@
// class.
// Note: Currently we only store the descriptor, so we cannot throw the exact throwable, only
// a recreation with a custom string.
- void ThrowEarlierClassFailure(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ThrowEarlierClassFailure(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
// Check for duplicate class definitions of the given oat file against all open oat files.
- bool HasCollisions(const OatFile* oat_file, std::string* error_msg) LOCKS_EXCLUDED(dex_lock_);
+ bool HasCollisions(const OatFile* oat_file, out<std::string> error_msg) REQUIRES(!dex_lock_);
- bool HasInitWithString(Thread* self, ClassLinker* class_linker, const char* descriptor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool HasInitWithString(Thread* self, const char* descriptor)
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
bool CanWeInitializeClass(mirror::Class* klass, bool can_init_statics, bool can_init_parents)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- void UpdateClassVirtualMethods(mirror::Class* klass, ArtMethod* new_methods,
+ void UpdateClassVirtualMethods(mirror::Class* klass,
+ ArtMethod* new_methods,
size_t new_num_methods)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::classlinker_classes_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::classlinker_classes_lock_);
std::vector<const DexFile*> boot_class_path_;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
@@ -700,43 +814,11 @@
std::vector<GcRoot<mirror::DexCache>> dex_caches_ GUARDED_BY(dex_lock_);
std::vector<const OatFile*> oat_files_ GUARDED_BY(dex_lock_);
- class ClassDescriptorHashEquals {
- public:
- // Same class loader and descriptor.
- std::size_t operator()(const GcRoot<mirror::Class>& root) const NO_THREAD_SAFETY_ANALYSIS;
- bool operator()(const GcRoot<mirror::Class>& a, const GcRoot<mirror::Class>& b) const
- NO_THREAD_SAFETY_ANALYSIS;
- // Same class loader and descriptor.
- std::size_t operator()(const std::pair<const char*, mirror::ClassLoader*>& element) const
- NO_THREAD_SAFETY_ANALYSIS;
- bool operator()(const GcRoot<mirror::Class>& a,
- const std::pair<const char*, mirror::ClassLoader*>& b) const
- NO_THREAD_SAFETY_ANALYSIS;
- // Same descriptor.
- bool operator()(const GcRoot<mirror::Class>& a, const char* descriptor) const
- NO_THREAD_SAFETY_ANALYSIS;
- std::size_t operator()(const char* descriptor) const NO_THREAD_SAFETY_ANALYSIS;
- };
- class GcRootEmptyFn {
- public:
- void MakeEmpty(GcRoot<mirror::Class>& item) const {
- item = GcRoot<mirror::Class>();
- }
- bool IsEmpty(const GcRoot<mirror::Class>& item) const {
- return item.IsNull();
- }
- };
+ // This contains strong roots. To enable concurrent root scanning of the class table.
+ ClassLoaderClassTable classes_ GUARDED_BY(Locks::classlinker_classes_lock_);
- // hash set which hashes class descriptor, and compares descriptors nad class loaders. Results
- // should be compared for a matching Class descriptor and class loader.
- typedef HashSet<GcRoot<mirror::Class>, GcRootEmptyFn, ClassDescriptorHashEquals,
- ClassDescriptorHashEquals, TrackingAllocator<GcRoot<mirror::Class>, kAllocatorTagClassTable>>
- Table;
- // This contains strong roots. To enable concurrent root scanning of
- // the class table, be careful to use a read barrier when accessing this.
- Table class_table_ GUARDED_BY(Locks::classlinker_classes_lock_);
- Table pre_zygote_class_table_ GUARDED_BY(Locks::classlinker_classes_lock_);
- std::vector<GcRoot<mirror::Class>> new_class_roots_;
+ // New class roots, only used by CMS since the GC needs to mark these in the pause.
+ std::vector<GcRoot<mirror::Class>> new_class_roots_ GUARDED_BY(Locks::classlinker_classes_lock_);
// Do we need to search dex caches to find image classes?
bool dex_cache_image_class_lookup_required_;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index a4e0227..3f8259a 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -46,7 +46,7 @@
class ClassLinkerTest : public CommonRuntimeTest {
protected:
void AssertNonExistentClass(const std::string& descriptor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Thread* self = Thread::Current();
EXPECT_TRUE(class_linker_->FindSystemClass(self, descriptor.c_str()) == nullptr);
EXPECT_TRUE(self->IsExceptionPending());
@@ -58,13 +58,13 @@
}
void AssertPrimitiveClass(const std::string& descriptor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Thread* self = Thread::Current();
AssertPrimitiveClass(descriptor, class_linker_->FindSystemClass(self, descriptor.c_str()));
}
void AssertPrimitiveClass(const std::string& descriptor, mirror::Class* primitive)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ASSERT_TRUE(primitive != nullptr);
ASSERT_TRUE(primitive->GetClass() != nullptr);
ASSERT_EQ(primitive->GetClass(), primitive->GetClass()->GetClass());
@@ -102,7 +102,7 @@
void AssertArrayClass(const std::string& array_descriptor,
const std::string& component_type,
mirror::ClassLoader* class_loader)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Thread* self = Thread::Current();
StackHandleScope<2> hs(self);
Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
@@ -116,7 +116,7 @@
}
void AssertArrayClass(const std::string& array_descriptor, Handle<mirror::Class> array)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ASSERT_TRUE(array.Get() != nullptr);
ASSERT_TRUE(array->GetClass() != nullptr);
ASSERT_EQ(array->GetClass(), array->GetClass()->GetClass());
@@ -159,7 +159,7 @@
EXPECT_EQ(class_linker_->FindArrayClass(self, &array_ptr), array.Get());
}
- void AssertMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void AssertMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
EXPECT_TRUE(method != nullptr);
EXPECT_TRUE(method->GetDeclaringClass() != nullptr);
EXPECT_TRUE(method->GetName() != nullptr);
@@ -174,7 +174,7 @@
}
void AssertField(mirror::Class* klass, ArtField* field)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
EXPECT_TRUE(field != nullptr);
EXPECT_EQ(klass, field->GetDeclaringClass());
EXPECT_TRUE(field->GetName() != nullptr);
@@ -182,7 +182,7 @@
}
void AssertClass(const std::string& descriptor, Handle<mirror::Class> klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::string temp;
EXPECT_STREQ(descriptor.c_str(), klass->GetDescriptor(&temp));
if (descriptor == "Ljava/lang/Object;") {
@@ -319,7 +319,7 @@
}
void AssertDexFileClass(mirror::ClassLoader* class_loader, const std::string& descriptor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ASSERT_TRUE(descriptor != nullptr);
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
@@ -339,7 +339,7 @@
}
void AssertDexFile(const DexFile& dex, mirror::ClassLoader* class_loader)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Verify all the classes defined in this file
for (size_t i = 0; i < dex.NumClassDefs(); i++) {
const DexFile::ClassDef& class_def = dex.GetClassDef(i);
@@ -385,7 +385,7 @@
std::string class_descriptor;
std::vector<CheckOffset> offsets;
- bool Check() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool Check() SHARED_REQUIRES(Locks::mutator_lock_) {
Thread* self = Thread::Current();
mirror::Class* klass =
Runtime::Current()->GetClassLinker()->FindSystemClass(self, class_descriptor.c_str());
@@ -1107,7 +1107,7 @@
}
static void CheckMethod(ArtMethod* method, bool verified)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (!method->IsNative() && !method->IsAbstract()) {
EXPECT_EQ((method->GetAccessFlags() & kAccPreverified) != 0U, verified)
<< PrettyMethod(method, true);
@@ -1115,7 +1115,7 @@
}
static void CheckPreverified(mirror::Class* c, bool preverified)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
EXPECT_EQ((c->GetAccessFlags() & kAccPreverified) != 0U, preverified)
<< "Class " << PrettyClass(c) << " not as expected";
for (auto& m : c->GetDirectMethods(sizeof(void*))) {
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
new file mode 100644
index 0000000..c245d4e
--- /dev/null
+++ b/runtime/class_table.cc
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "class_table.h"
+
+#include "mirror/class-inl.h"
+
+namespace art {
+
+ClassTable::ClassTable() {
+ classes_.push_back(ClassSet());
+}
+
+void ClassTable::FreezeSnapshot() {
+ classes_.push_back(ClassSet());
+}
+
+bool ClassTable::Contains(mirror::Class* klass) {
+ for (ClassSet& class_set : classes_) {
+ auto it = class_set.Find(GcRoot<mirror::Class>(klass));
+ if (it != class_set.end()) {
+ return it->Read() == klass;
+ }
+ }
+ return false;
+}
+
+mirror::Class* ClassTable::UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash) {
+ // Should only be updating latest table.
+ auto existing_it = classes_.back().FindWithHash(descriptor, hash);
+ if (kIsDebugBuild && existing_it == classes_.back().end()) {
+ for (const ClassSet& class_set : classes_) {
+ if (class_set.FindWithHash(descriptor, hash) != class_set.end()) {
+ LOG(FATAL) << "Updating class found in frozen table " << descriptor;
+ }
+ }
+ LOG(FATAL) << "Updating class not found " << descriptor;
+ }
+ mirror::Class* const existing = existing_it->Read();
+ CHECK_NE(existing, klass) << descriptor;
+ CHECK(!existing->IsResolved()) << descriptor;
+ CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusResolving) << descriptor;
+ CHECK(!klass->IsTemp()) << descriptor;
+ VerifyObject(klass);
+ // Update the element in the hash set with the new class. This is safe to do since the descriptor
+ // doesn't change.
+ *existing_it = GcRoot<mirror::Class>(klass);
+ return existing;
+}
+
+void ClassTable::VisitRoots(RootVisitor* visitor, VisitRootFlags flags ATTRIBUTE_UNUSED) {
+ BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
+ visitor, RootInfo(kRootStickyClass));
+ for (ClassSet& class_set : classes_) {
+ for (GcRoot<mirror::Class>& root : class_set) {
+ buffered_visitor.VisitRoot(root);
+ }
+ }
+}
+
+bool ClassTable::Visit(ClassVisitor* visitor) {
+ for (ClassSet& class_set : classes_) {
+ for (GcRoot<mirror::Class>& root : class_set) {
+ if (!visitor->Visit(root.Read())) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+size_t ClassTable::NumZygoteClasses() const {
+ size_t sum = 0;
+ for (size_t i = 0; i < classes_.size() - 1; ++i) {
+ sum += classes_[i].Size();
+ }
+ return sum;
+}
+
+size_t ClassTable::NumNonZygoteClasses() const {
+ return classes_.back().Size();
+}
+
+mirror::Class* ClassTable::Lookup(const char* descriptor, size_t hash) {
+ for (ClassSet& class_set : classes_) {
+ auto it = class_set.FindWithHash(descriptor, hash);
+ if (it != class_set.end()) {
+ return it->Read();
+ }
+ }
+ return nullptr;
+}
+
+void ClassTable::Insert(mirror::Class* klass) {
+ classes_.back().Insert(GcRoot<mirror::Class>(klass));
+}
+
+void ClassTable::InsertWithHash(mirror::Class* klass, size_t hash) {
+ classes_.back().InsertWithHash(GcRoot<mirror::Class>(klass), hash);
+}
+
+bool ClassTable::Remove(const char* descriptor) {
+ for (ClassSet& class_set : classes_) {
+ auto it = class_set.Find(descriptor);
+ if (it != class_set.end()) {
+ class_set.Erase(it);
+ return true;
+ }
+ }
+ return false;
+}
+
+std::size_t ClassTable::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& root)
+ const {
+ std::string temp;
+ return ComputeModifiedUtf8Hash(root.Read()->GetDescriptor(&temp));
+}
+
+bool ClassTable::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a,
+ const GcRoot<mirror::Class>& b) const {
+ DCHECK_EQ(a.Read()->GetClassLoader(), b.Read()->GetClassLoader());
+ std::string temp;
+ return a.Read()->DescriptorEquals(b.Read()->GetDescriptor(&temp));
+}
+
+bool ClassTable::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a,
+ const char* descriptor) const {
+ return a.Read()->DescriptorEquals(descriptor);
+}
+
+std::size_t ClassTable::ClassDescriptorHashEquals::operator()(const char* descriptor) const {
+ return ComputeModifiedUtf8Hash(descriptor);
+}
+
+} // namespace art
diff --git a/runtime/class_table.h b/runtime/class_table.h
new file mode 100644
index 0000000..252a47d
--- /dev/null
+++ b/runtime/class_table.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_CLASS_TABLE_H_
+#define ART_RUNTIME_CLASS_TABLE_H_
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/allocator.h"
+#include "base/hash_set.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "dex_file.h"
+#include "gc_root.h"
+#include "object_callbacks.h"
+#include "runtime.h"
+
+namespace art {
+
+namespace mirror {
+ class ClassLoader;
+} // namespace mirror
+
+class ClassVisitor {
+ public:
+ virtual ~ClassVisitor() {}
+ // Return true to continue visiting.
+ virtual bool Visit(mirror::Class* klass) = 0;
+};
+
+// Each loader has a ClassTable
+class ClassTable {
+ public:
+ ClassTable();
+
+ // Used by image writer for checking.
+ bool Contains(mirror::Class* klass)
+ REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Freeze the current class tables by allocating a new table and never updating or modifying the
+ // existing table. This helps prevents dirty pages after caused by inserting after zygote fork.
+ void FreezeSnapshot()
+ REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Returns the number of classes in previous snapshots.
+ size_t NumZygoteClasses() const REQUIRES(Locks::classlinker_classes_lock_);
+
+ // Returns all off the classes in the lastest snapshot.
+ size_t NumNonZygoteClasses() const REQUIRES(Locks::classlinker_classes_lock_);
+
+ // Update a class in the table with the new class. Returns the existing class which was replaced.
+ mirror::Class* UpdateClass(const char* descriptor, mirror::Class* new_klass, size_t hash)
+ REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
+ REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Return false if the callback told us to exit.
+ bool Visit(ClassVisitor* visitor)
+ REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ mirror::Class* Lookup(const char* descriptor, size_t hash)
+ SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
+
+ void Insert(mirror::Class* klass)
+ REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ void InsertWithHash(mirror::Class* klass, size_t hash)
+ REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Returns true if the class was found and removed, false otherwise.
+ bool Remove(const char* descriptor)
+ REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ private:
+ class ClassDescriptorHashEquals {
+ public:
+ // Same class loader and descriptor.
+ std::size_t operator()(const GcRoot<mirror::Class>& root) const NO_THREAD_SAFETY_ANALYSIS;
+ bool operator()(const GcRoot<mirror::Class>& a, const GcRoot<mirror::Class>& b) const
+ NO_THREAD_SAFETY_ANALYSIS;;
+ // Same descriptor.
+ bool operator()(const GcRoot<mirror::Class>& a, const char* descriptor) const
+ NO_THREAD_SAFETY_ANALYSIS;
+ std::size_t operator()(const char* descriptor) const NO_THREAD_SAFETY_ANALYSIS;
+ };
+ class GcRootEmptyFn {
+ public:
+ void MakeEmpty(GcRoot<mirror::Class>& item) const {
+ item = GcRoot<mirror::Class>();
+ }
+ bool IsEmpty(const GcRoot<mirror::Class>& item) const {
+ return item.IsNull();
+ }
+ };
+ // hash set which hashes class descriptor, and compares descriptors nad class loaders. Results
+ // should be compared for a matching Class descriptor and class loader.
+ typedef HashSet<GcRoot<mirror::Class>, GcRootEmptyFn, ClassDescriptorHashEquals,
+ ClassDescriptorHashEquals, TrackingAllocator<GcRoot<mirror::Class>, kAllocatorTagClassTable>>
+ ClassSet;
+
+ // TODO: shard lock to have one per class loader.
+ std::vector<ClassSet> classes_ GUARDED_BY(Locks::classlinker_classes_lock_);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_CLASS_TABLE_H_
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 2332f97..a474ae6 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -122,12 +122,12 @@
std::string GetTestDexFileName(const char* name);
std::vector<std::unique_ptr<const DexFile>> OpenTestDexFiles(const char* name)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
std::unique_ptr<const DexFile> OpenTestDexFile(const char* name)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- jobject LoadDex(const char* dex_name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ jobject LoadDex(const char* dex_name) SHARED_REQUIRES(Locks::mutator_lock_);
std::string android_data_;
std::string dalvik_cache_;
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 3acd366..de692d1 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -34,7 +34,7 @@
namespace art {
static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (referrer != nullptr) {
std::string location(referrer->GetLocation());
if (!location.empty()) {
@@ -46,7 +46,7 @@
static void ThrowException(const char* exception_descriptor,
mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::ostringstream msg;
if (args != nullptr) {
std::string vmsg;
@@ -62,7 +62,7 @@
static void ThrowWrappedException(const char* exception_descriptor,
mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::ostringstream msg;
if (args != nullptr) {
std::string vmsg;
@@ -336,7 +336,7 @@
static void ThrowNullPointerExceptionForMethodAccessImpl(uint32_t method_idx,
const DexFile& dex_file,
InvokeType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::ostringstream msg;
msg << "Attempt to invoke " << type << " method '"
<< PrettyMethod(method_idx, dex_file, true) << "' on a null object reference";
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index b391c5b..2402e6f 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -33,169 +33,169 @@
// AbstractMethodError
void ThrowAbstractMethodError(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
// ArithmeticException
-void ThrowArithmeticExceptionDivideByZero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+void ThrowArithmeticExceptionDivideByZero() SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
// ArrayIndexOutOfBoundsException
void ThrowArrayIndexOutOfBoundsException(int index, int length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
// ArrayStoreException
void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array_class)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
// ClassCircularityError
void ThrowClassCircularityError(mirror::Class* c)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
// ClassCastException
void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowClassCastException(const char* msg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
// ClassFormatError
void ThrowClassFormatError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
// IllegalAccessError
void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* accessed)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed,
ArtMethod* called,
InvokeType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, ArtMethod* accessed)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorField(mirror::Class* referrer, ArtField* accessed)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorFinalField(ArtMethod* referrer, ArtField* accessed)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
// IllegalAccessException
void ThrowIllegalAccessException(const char* msg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
// IllegalArgumentException
void ThrowIllegalArgumentException(const char* msg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
// IncompatibleClassChangeError
void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type,
ArtMethod* method, ArtMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(ArtMethod* interface_method,
mirror::Object* this_object,
ArtMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeErrorField(ArtField* resolved_field, bool is_static,
ArtMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
// IOException
void ThrowIOException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowWrappedIOException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
// LinkageError
void ThrowLinkageError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowWrappedLinkageError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
// NegativeArraySizeException
void ThrowNegativeArraySizeException(int size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowNegativeArraySizeException(const char* msg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
// NoSuchFieldError
void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c,
const StringPiece& type, const StringPiece& name)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void ThrowNoSuchFieldException(mirror::Class* c, const StringPiece& name)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// NoSuchMethodError
void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name,
const Signature& signature)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowNoSuchMethodError(uint32_t method_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
// NullPointerException
void ThrowNullPointerExceptionForFieldAccess(ArtField* field,
bool is_read)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
InvokeType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method,
InvokeType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowNullPointerExceptionFromDexPC()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowNullPointerException(const char* msg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
// RuntimeException
void ThrowRuntimeException(const char* fmt, ...)
__attribute__((__format__(__printf__, 1, 2)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
// VerifyError
void ThrowVerifyError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
} // namespace art
diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h
index b296e39..af7b04f 100644
--- a/runtime/compiler_callbacks.h
+++ b/runtime/compiler_callbacks.h
@@ -38,7 +38,7 @@
virtual ~CompilerCallbacks() { }
virtual bool MethodVerified(verifier::MethodVerifier* verifier)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
virtual void ClassRejected(ClassReference ref) = 0;
// Return true if we should attempt to relocate to a random base address if we have not already
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index eccebf1..1865516 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -24,6 +24,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/time_utils.h"
+#include "base/out.h"
#include "class_linker.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
@@ -72,7 +73,7 @@
public:
Breakpoint(ArtMethod* method, uint32_t dex_pc,
DeoptimizationRequest::Kind deoptimization_kind)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: method_(nullptr), dex_pc_(dex_pc), deoptimization_kind_(deoptimization_kind) {
CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing ||
deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization ||
@@ -81,14 +82,14 @@
method_ = soa.EncodeMethod(method);
}
- Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ Breakpoint(const Breakpoint& other) SHARED_REQUIRES(Locks::mutator_lock_)
: method_(nullptr), dex_pc_(other.dex_pc_),
deoptimization_kind_(other.deoptimization_kind_) {
ScopedObjectAccessUnchecked soa(Thread::Current());
method_ = soa.EncodeMethod(other.Method());
}
- ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ArtMethod* Method() const SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedObjectAccessUnchecked soa(Thread::Current());
return soa.DecodeMethod(method_);
}
@@ -111,7 +112,7 @@
};
static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
return os;
}
@@ -123,7 +124,7 @@
void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method,
uint32_t dex_pc)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
return;
@@ -149,7 +150,7 @@
void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method,
uint32_t dex_pc, const JValue& return_value)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
return;
@@ -166,7 +167,7 @@
void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED,
ArtMethod* method, uint32_t dex_pc)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
<< " " << dex_pc;
@@ -174,7 +175,7 @@
void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method,
uint32_t new_dex_pc)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
// We also listen to kMethodExited instrumentation event and the current instruction is a
// RETURN so we know the MethodExited method is going to be called right after us. Like in
@@ -195,47 +196,47 @@
void FieldRead(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc, ArtField* field)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
}
void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc, ArtField* field,
const JValue& field_value)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
}
void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, mirror::Throwable* exception_object)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Dbg::PostException(exception_object);
}
// We only care about how many backward branches were executed in the Jit.
void BackwardBranch(Thread* /*thread*/, ArtMethod* method, int32_t dex_pc_offset)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected backward branch event in debugger " << PrettyMethod(method)
<< " " << dex_pc_offset;
}
private:
static bool IsReturn(ArtMethod* method, uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = method->GetCodeItem();
const Instruction* instruction = Instruction::At(&code_item->insns_[dex_pc]);
return instruction->IsReturn();
}
- static bool IsListeningToDexPcMoved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static bool IsListeningToDexPcMoved() SHARED_REQUIRES(Locks::mutator_lock_) {
return IsListeningTo(instrumentation::Instrumentation::kDexPcMoved);
}
- static bool IsListeningToMethodExit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static bool IsListeningToMethodExit() SHARED_REQUIRES(Locks::mutator_lock_) {
return IsListeningTo(instrumentation::Instrumentation::kMethodExited);
}
static bool IsListeningTo(instrumentation::Instrumentation::InstrumentationEvent event)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return (Dbg::GetInstrumentationEvents() & event) != 0;
}
@@ -298,8 +299,8 @@
}
static bool IsBreakpoint(const ArtMethod* m, uint32_t dex_pc)
- LOCKS_EXCLUDED(Locks::breakpoint_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ REQUIRES(!Locks::breakpoint_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) {
@@ -311,7 +312,7 @@
}
static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) {
+ REQUIRES(!Locks::thread_suspend_count_lock_) {
MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
// A thread may be suspended for GC; in this code, we really want to know whether
// there's a debugger suspension active.
@@ -319,7 +320,7 @@
}
static mirror::Array* DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* error)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
if (o == nullptr) {
*error = JDWP::ERR_INVALID_OBJECT;
@@ -334,7 +335,7 @@
}
static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
if (o == nullptr) {
*error = JDWP::ERR_INVALID_OBJECT;
@@ -350,8 +351,8 @@
static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id,
JDWP::JdwpError* error)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) {
mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id, error);
if (thread_peer == nullptr) {
// This isn't even an object.
@@ -381,14 +382,14 @@
}
static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::string temp;
const char* descriptor = klass->GetDescriptor(&temp);
return BasicTagFromDescriptor(descriptor);
}
static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(c != nullptr);
if (c->IsArrayClass()) {
return JDWP::JT_ARRAY;
@@ -764,7 +765,7 @@
OwnedMonitorVisitor(Thread* thread, Context* context,
std::vector<JDWP::ObjectId>* monitor_vector,
std::vector<uint32_t>* stack_depth_vector)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
current_stack_depth(0),
monitors(monitor_vector),
@@ -781,7 +782,7 @@
}
static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
visitor->monitors->push_back(gRegistry->Add(owned_monitor));
visitor->stack_depths->push_back(visitor->current_stack_depth);
@@ -948,33 +949,27 @@
return JDWP::ERR_NONE;
}
+// Get the complete list of reference classes (i.e. all classes except
+// the primitive types).
+// Returns a newly-allocated buffer full of RefTypeId values.
+class ClassListCreator : public ClassVisitor {
+ public:
+ explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes_(classes) {}
+
+ bool Visit(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!c->IsPrimitive()) {
+ classes_->push_back(Dbg::GetObjectRegistry()->AddRefType(c));
+ }
+ return true;
+ }
+
+ private:
+ std::vector<JDWP::RefTypeId>* const classes_;
+};
+
void Dbg::GetClassList(std::vector<JDWP::RefTypeId>* classes) {
- // Get the complete list of reference classes (i.e. all classes except
- // the primitive types).
- // Returns a newly-allocated buffer full of RefTypeId values.
- struct ClassListCreator {
- explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes_in) : classes(classes_in) {
- }
-
- static bool Visit(mirror::Class* c, void* arg) {
- return reinterpret_cast<ClassListCreator*>(arg)->Visit(c);
- }
-
- // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
- // annotalysis.
- bool Visit(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS {
- if (!c->IsPrimitive()) {
- classes->push_back(gRegistry->AddRefType(c));
- }
- return true;
- }
-
- std::vector<JDWP::RefTypeId>* const classes;
- };
-
ClassListCreator clc(classes);
- Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(ClassListCreator::Visit,
- &clc);
+ Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&clc);
}
JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
@@ -1006,7 +1001,7 @@
void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids) {
std::vector<mirror::Class*> classes;
- Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
+ Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, outof(classes));
ids->clear();
for (size_t i = 0; i < classes.size(); ++i) {
ids->push_back(gRegistry->Add(classes[i]));
@@ -1270,17 +1265,17 @@
}
static JDWP::MethodId ToMethodId(const ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
}
static ArtField* FromFieldId(JDWP::FieldId fid)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return reinterpret_cast<ArtField*>(static_cast<uintptr_t>(fid));
}
static ArtMethod* FromMethodId(JDWP::MethodId mid)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(mid));
}
@@ -1326,10 +1321,7 @@
return modifier_instance == event_instance;
}
-void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_) {
+void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc) {
if (m == nullptr) {
memset(location, 0, sizeof(*location));
} else {
@@ -1376,7 +1368,7 @@
* the end.
*/
static uint16_t MangleSlot(uint16_t slot, ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
if (code_item == nullptr) {
// We should not get here for a method without code (native, proxy or abstract). Log it and
@@ -1398,7 +1390,7 @@
* slots to dex style argument placement.
*/
static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
if (code_item == nullptr) {
// We should not get here for a method without code (native, proxy or abstract). Log it and
@@ -1424,7 +1416,8 @@
return DexFile::kDexNoIndex16;
}
-JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) {
+JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic,
+ JDWP::ExpandBuf* pReply) {
JDWP::JdwpError error;
mirror::Class* c = DecodeClass(class_id, &error);
if (c == nullptr) {
@@ -1437,7 +1430,8 @@
expandBufAdd4BE(pReply, instance_field_count + static_field_count);
for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
- ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
+ ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) :
+ c->GetStaticField(i - instance_field_count);
expandBufAddFieldId(pReply, ToFieldId(f));
expandBufAddUtf8String(pReply, f->GetName());
expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
@@ -1553,7 +1547,7 @@
static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress,
const char* name, const char* descriptor, const char* signature)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
VLOG(jdwp) << StringPrintf(" %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
@@ -1641,7 +1635,7 @@
}
static JValue GetArtFieldValue(ArtField* f, mirror::Object* o)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
JValue field_value;
switch (fieldType) {
@@ -1688,7 +1682,7 @@
static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
JDWP::JdwpError error;
mirror::Class* c = DecodeClass(ref_type_id, &error);
if (ref_type_id != 0 && c == nullptr) {
@@ -1744,7 +1738,7 @@
}
static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t value, int width)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
// Debugging only happens at runtime so we know we are not running in a transaction.
static constexpr bool kNoTransactionMode = false;
@@ -1815,7 +1809,7 @@
static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
uint64_t value, int width, bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
JDWP::JdwpError error;
mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error);
if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
@@ -1945,7 +1939,7 @@
static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa,
JDWP::ObjectId thread_group_id, JDWP::JdwpError* error)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Object* thread_group = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_group_id,
error);
if (*error != JDWP::ERR_NONE) {
@@ -2004,7 +1998,7 @@
static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group,
std::vector<JDWP::ObjectId>* child_thread_group_ids)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(thread_group != nullptr);
// Get the ArrayList<ThreadGroup> "groups" out of this thread group...
@@ -2158,7 +2152,7 @@
static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
mirror::Object* desired_thread_group, mirror::Object* peer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Do we want threads from all thread groups?
if (desired_thread_group == nullptr) {
return true;
@@ -2202,7 +2196,7 @@
}
}
-static int GetStackDepth(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+static int GetStackDepth(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) {
struct CountStackDepthVisitor : public StackVisitor {
explicit CountStackDepthVisitor(Thread* thread_in)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
@@ -2245,7 +2239,7 @@
public:
GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in,
JDWP::ExpandBuf* buf_in)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
depth_(0),
start_frame_(start_frame_in),
@@ -2254,7 +2248,7 @@
expandBufAdd4BE(buf_, frame_count_);
}
- bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
if (GetMethod()->IsRuntimeMethod()) {
return true; // The debugger can't do anything useful with a frame that has no Method*.
}
@@ -2366,7 +2360,7 @@
struct GetThisVisitor : public StackVisitor {
GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
this_object(nullptr),
frame_id(frame_id_in) {}
@@ -2408,7 +2402,7 @@
class FindFrameVisitor FINAL : public StackVisitor {
public:
FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
frame_id_(frame_id),
error_(JDWP::ERR_INVALID_FRAMEID) {}
@@ -2482,14 +2476,14 @@
constexpr JDWP::JdwpError kStackFrameLocalAccessError = JDWP::ERR_ABSENT_INFORMATION;
static std::string GetStackContextAsString(const StackVisitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return StringPrintf(" at DEX pc 0x%08x in method %s", visitor.GetDexPc(false),
PrettyMethod(visitor.GetMethod()).c_str());
}
static JDWP::JdwpError FailGetLocalValue(const StackVisitor& visitor, uint16_t vreg,
JDWP::JdwpTag tag)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
LOG(ERROR) << "Failed to read " << tag << " local from register v" << vreg
<< GetStackContextAsString(visitor);
return kStackFrameLocalAccessError;
@@ -2651,7 +2645,7 @@
template<typename T>
static JDWP::JdwpError FailSetLocalValue(const StackVisitor& visitor, uint16_t vreg,
JDWP::JdwpTag tag, T value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
LOG(ERROR) << "Failed to write " << tag << " local " << value
<< " (0x" << std::hex << value << ") into register v" << vreg
<< GetStackContextAsString(visitor);
@@ -2736,7 +2730,7 @@
}
static void SetEventLocation(JDWP::EventLocation* location, ArtMethod* m, uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(location != nullptr);
if (m == nullptr) {
memset(location, 0, sizeof(*location));
@@ -2814,7 +2808,7 @@
class CatchLocationFinder : public StackVisitor {
public:
CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
self_(self),
exception_(exception),
@@ -2826,7 +2820,7 @@
throw_dex_pc_(DexFile::kDexNoIndex) {
}
- bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
DCHECK(method != nullptr);
if (method->IsRuntimeMethod()) {
@@ -2860,15 +2854,15 @@
return true; // Continue stack walk.
}
- ArtMethod* GetCatchMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ArtMethod* GetCatchMethod() SHARED_REQUIRES(Locks::mutator_lock_) {
return catch_method_;
}
- ArtMethod* GetThrowMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ArtMethod* GetThrowMethod() SHARED_REQUIRES(Locks::mutator_lock_) {
return throw_method_;
}
- mirror::Object* GetThisAtThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* GetThisAtThrow() SHARED_REQUIRES(Locks::mutator_lock_) {
return this_at_throw_.Get();
}
@@ -3170,7 +3164,7 @@
}
static bool IsMethodPossiblyInlined(Thread* self, ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
if (code_item == nullptr) {
// TODO We should not be asked to watch location in a native or abstract method so the code item
@@ -3191,7 +3185,7 @@
}
static const Breakpoint* FindFirstBreakpointForMethod(ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) {
for (Breakpoint& breakpoint : gBreakpoints) {
if (breakpoint.Method() == m) {
return &breakpoint;
@@ -3208,7 +3202,7 @@
// Sanity checks all existing breakpoints on the same method.
static void SanityCheckExistingBreakpoints(ArtMethod* m,
DeoptimizationRequest::Kind deoptimization_kind)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) {
for (const Breakpoint& breakpoint : gBreakpoints) {
if (breakpoint.Method() == m) {
CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind());
@@ -3237,7 +3231,7 @@
static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
ArtMethod* m,
const Breakpoint** existing_brkpt)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (!Dbg::RequiresDeoptimization()) {
// We already run in interpreter-only mode so we don't need to deoptimize anything.
VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method "
@@ -3498,8 +3492,8 @@
class ScopedThreadSuspension {
public:
ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
- LOCKS_EXCLUDED(Locks::thread_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+ REQUIRES(!Locks::thread_list_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_) :
thread_(nullptr),
error_(JDWP::ERR_NONE),
self_suspend_(false),
@@ -3560,7 +3554,7 @@
// Work out what ArtMethod* we're in, the current line number, and how deep the stack currently
// is for step-out.
struct SingleStepStackVisitor : public StackVisitor {
- explicit SingleStepStackVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ explicit SingleStepStackVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
stack_depth(0),
method(nullptr),
@@ -4419,7 +4413,7 @@
needHeader_ = false;
}
- void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void Flush() SHARED_REQUIRES(Locks::mutator_lock_) {
if (pieceLenField_ == nullptr) {
// Flush immediately post Reset (maybe back-to-back Flush). Ignore.
CHECK(needHeader_);
@@ -4435,13 +4429,13 @@
}
static void HeapChunkJavaCallback(void* start, void* end, size_t used_bytes, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_,
Locks::mutator_lock_) {
reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkJavaCallback(start, end, used_bytes);
}
static void HeapChunkNativeCallback(void* start, void* end, size_t used_bytes, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkNativeCallback(start, end, used_bytes);
}
@@ -4461,7 +4455,7 @@
}
// Returns true if the object is not an empty chunk.
- bool ProcessRecord(void* start, size_t used_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool ProcessRecord(void* start, size_t used_bytes) SHARED_REQUIRES(Locks::mutator_lock_) {
// Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
// in the following code not to allocate memory, by ensuring buf_ is of the correct size
if (used_bytes == 0) {
@@ -4498,7 +4492,7 @@
}
void HeapChunkNativeCallback(void* start, void* /*end*/, size_t used_bytes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (ProcessRecord(start, used_bytes)) {
uint8_t state = ExamineNativeObject(start);
AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/);
@@ -4507,7 +4501,7 @@
}
void HeapChunkJavaCallback(void* start, void* /*end*/, size_t used_bytes)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
if (ProcessRecord(start, used_bytes)) {
// Determine the type of this chunk.
// OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
@@ -4519,7 +4513,7 @@
}
void AppendChunk(uint8_t state, void* ptr, size_t length, bool is_native)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Make sure there's enough room left in the buffer.
// We need to use two bytes for every fractional 256 allocation units used by the chunk plus
// 17 bytes for any header.
@@ -4552,12 +4546,12 @@
*p_++ = length - 1;
}
- uint8_t ExamineNativeObject(const void* p) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint8_t ExamineNativeObject(const void* p) SHARED_REQUIRES(Locks::mutator_lock_) {
return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
}
uint8_t ExamineJavaObject(mirror::Object* o)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
if (o == nullptr) {
return HPSG_STATE(SOLIDITY_FREE, 0);
}
@@ -4607,7 +4601,7 @@
};
static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
HeapChunkContext::HeapChunkJavaCallback(
obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
@@ -4633,7 +4627,7 @@
// Send a series of heap segment chunks.
HeapChunkContext context(what == HPSG_WHAT_MERGED_OBJECTS, native);
if (native) {
-#if defined(HAVE_ANDROID_OS) && defined(USE_DLMALLOC)
+#if defined(__ANDROID__) && defined(USE_DLMALLOC)
dlmalloc_inspect_all(HeapChunkContext::HeapChunkNativeCallback, &context);
HeapChunkContext::HeapChunkNativeCallback(nullptr, nullptr, 0, &context); // Indicate end of a space.
#else
@@ -4772,7 +4766,7 @@
};
static const char* GetMethodSourceFile(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(method != nullptr);
const char* source_file = method->GetDeclaringClassSourceFile();
return (source_file != nullptr) ? source_file : "";
diff --git a/runtime/debugger.h b/runtime/debugger.h
index fd7d46c..a9fa6ce 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -79,7 +79,7 @@
JDWP::ExpandBuf* const reply;
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
DISALLOW_COPY_AND_ASSIGN(DebugInvokeReq);
@@ -155,15 +155,15 @@
DeoptimizationRequest() : kind_(kNothing), instrumentation_event_(0), method_(nullptr) {}
DeoptimizationRequest(const DeoptimizationRequest& other)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: kind_(other.kind_), instrumentation_event_(other.instrumentation_event_) {
// Create a new JNI global reference for the method.
SetMethod(other.Method());
}
- ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* Method() const SHARED_REQUIRES(Locks::mutator_lock_);
- void SetMethod(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetMethod(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_);
// Name 'Kind()' would collide with the above enum name.
Kind GetKind() const {
@@ -205,7 +205,7 @@
static void StopJdwp();
// Invoked by the GC in case we need to keep DDMS informed.
- static void GcDidFinish() LOCKS_EXCLUDED(Locks::mutator_lock_);
+ static void GcDidFinish() REQUIRES(!Locks::mutator_lock_);
// Return the DebugInvokeReq for the current thread.
static DebugInvokeReq* GetInvokeReq();
@@ -219,8 +219,8 @@
*/
static void Connected();
static void GoActive()
- LOCKS_EXCLUDED(Locks::breakpoint_lock_, Locks::deoptimization_lock_, Locks::mutator_lock_);
- static void Disconnected() LOCKS_EXCLUDED(Locks::deoptimization_lock_, Locks::mutator_lock_);
+ REQUIRES(!Locks::breakpoint_lock_, !Locks::deoptimization_lock_, !Locks::mutator_lock_);
+ static void Disconnected() REQUIRES(!Locks::deoptimization_lock_, !Locks::mutator_lock_);
static void Dispose() {
gDisposed = true;
}
@@ -239,8 +239,7 @@
// Returns true if a method has any breakpoints.
static bool MethodHasAnyBreakpoints(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::breakpoint_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::breakpoint_lock_);
static bool IsDisposed() {
return gDisposed;
@@ -254,248 +253,233 @@
static int64_t LastDebuggerActivity();
static void UndoDebuggerSuspensions()
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
/*
* Class, Object, Array
*/
static std::string GetClassName(JDWP::RefTypeId id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static std::string GetClassName(mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* superclass_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void GetClassList(std::vector<JDWP::RefTypeId>* classes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
uint32_t* pStatus, std::string* pDescriptor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetSignature(JDWP::RefTypeId ref_type_id, std::string* signature)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetSourceFile(JDWP::RefTypeId ref_type_id, std::string* source_file)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetObjectTag(JDWP::ObjectId object_id, uint8_t* tag)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static size_t GetTagWidth(JDWP::JdwpTag tag);
static JDWP::JdwpError GetArrayLength(JDWP::ObjectId array_id, int32_t* length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError OutputArray(JDWP::ObjectId array_id, int offset, int count,
JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
JDWP::Request* request)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError CreateString(const std::string& str, JDWP::ObjectId* new_string_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_object_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
JDWP::ObjectId* new_array_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
//
// Event filtering.
//
static bool MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static bool MatchLocation(const JDWP::JdwpLocation& expected_location,
const JDWP::EventLocation& event_location)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static bool MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static bool MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
ArtField* event_field)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static bool MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
//
// Monitors.
//
static JDWP::JdwpError GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetOwnedMonitors(JDWP::ObjectId thread_id,
std::vector<JDWP::ObjectId>* monitors,
std::vector<uint32_t>* stack_depths)
- LOCKS_EXCLUDED(Locks::thread_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetContendedMonitor(JDWP::ObjectId thread_id,
JDWP::ObjectId* contended_monitor)
- LOCKS_EXCLUDED(Locks::thread_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
//
// Heap.
//
static JDWP::JdwpError GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
std::vector<uint64_t>* counts)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetInstances(JDWP::RefTypeId class_id, int32_t max_count,
std::vector<JDWP::ObjectId>* instances)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
std::vector<JDWP::ObjectId>* referring_objects)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError DisableCollection(JDWP::ObjectId object_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError EnableCollection(JDWP::ObjectId object_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError IsCollected(JDWP::ObjectId object_id, bool* is_collected)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
//
// Methods and fields.
//
static std::string GetMethodName(JDWP::MethodId method_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError OutputDeclaredFields(JDWP::RefTypeId ref_type_id, bool with_generic,
JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError OutputDeclaredMethods(JDWP::RefTypeId ref_type_id, bool with_generic,
JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError OutputDeclaredInterfaces(JDWP::RefTypeId ref_type_id,
JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void OutputLineTable(JDWP::RefTypeId ref_type_id, JDWP::MethodId method_id,
JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void OutputVariableTable(JDWP::RefTypeId ref_type_id, JDWP::MethodId id, bool with_generic,
JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetBytecodes(JDWP::RefTypeId class_id, JDWP::MethodId method_id,
std::vector<uint8_t>* bytecodes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static std::string GetFieldName(JDWP::FieldId field_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpTag GetFieldBasicTag(JDWP::FieldId field_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpTag GetStaticFieldBasicTag(JDWP::FieldId field_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
uint64_t value, int width)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id,
JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError StringToUtf8(JDWP::ObjectId string_id, std::string* str)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Thread, ThreadGroup, Frame
*/
static JDWP::JdwpError GetThreadName(JDWP::ObjectId thread_id, std::string* name)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_);
static JDWP::JdwpError GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_);
static JDWP::JdwpError GetThreadGroupName(JDWP::ObjectId thread_group_id,
JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetThreadGroupParent(JDWP::ObjectId thread_group_id,
JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
JDWP::ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::ObjectId GetSystemThreadGroupId()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpThreadStatus ToJdwpThreadStatus(ThreadState state);
static JDWP::JdwpError GetThreadStatus(JDWP::ObjectId thread_id,
JDWP::JdwpThreadStatus* pThreadStatus,
JDWP::JdwpSuspendStatus* pSuspendStatus)
- LOCKS_EXCLUDED(Locks::thread_list_lock_);
+ REQUIRES(!Locks::thread_list_lock_);
static JDWP::JdwpError GetThreadDebugSuspendCount(JDWP::ObjectId thread_id,
JDWP::ExpandBuf* pReply)
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
// static void WaitForSuspend(JDWP::ObjectId thread_id);
// Fills 'thread_ids' with the threads in the given thread group. If thread_group_id == 0,
// returns all threads.
static void GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids)
- LOCKS_EXCLUDED(Locks::thread_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result)
- LOCKS_EXCLUDED(Locks::thread_list_lock_);
+ REQUIRES(!Locks::thread_list_lock_);
static JDWP::JdwpError GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
size_t frame_count, JDWP::ExpandBuf* buf)
- LOCKS_EXCLUDED(Locks::thread_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
- static JDWP::ObjectId GetThreadSelfId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static JDWP::ObjectId GetThreadId(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static JDWP::ObjectId GetThreadSelfId() SHARED_REQUIRES(Locks::mutator_lock_);
+ static JDWP::ObjectId GetThreadId(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_);
static void SuspendVM()
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
static void ResumeVM()
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
static JDWP::JdwpError SuspendThread(JDWP::ObjectId thread_id, bool request_suspension = true)
- LOCKS_EXCLUDED(Locks::mutator_lock_,
- Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+ !Locks::thread_suspend_count_lock_);
static void ResumeThread(JDWP::ObjectId thread_id)
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void SuspendSelf();
static JDWP::JdwpError GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
JDWP::ObjectId* result)
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply)
- LOCKS_EXCLUDED(Locks::thread_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError SetLocalValues(JDWP::Request* request)
- LOCKS_EXCLUDED(Locks::thread_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError Interrupt(JDWP::ObjectId thread_id)
- LOCKS_EXCLUDED(Locks::thread_list_lock_);
+ REQUIRES(!Locks::thread_list_lock_);
/*
* Debugger notification
@@ -508,47 +492,42 @@
};
static void PostFieldAccessEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object,
ArtField* f)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void PostFieldModificationEvent(ArtMethod* m, int dex_pc,
mirror::Object* this_object, ArtField* f,
const JValue* field_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void PostException(mirror::Throwable* exception)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void PostThreadStart(Thread* t)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void PostThreadDeath(Thread* t)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void PostClassPrepare(mirror::Class* c)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void UpdateDebugger(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t new_dex_pc,
int event_flags, const JValue* return_value)
- LOCKS_EXCLUDED(Locks::breakpoint_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
// Indicates whether we need deoptimization for debugging.
static bool RequiresDeoptimization();
// Records deoptimization request in the queue.
static void RequestDeoptimization(const DeoptimizationRequest& req)
- LOCKS_EXCLUDED(Locks::deoptimization_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
// Manage deoptimization after updating JDWP events list. Suspends all threads, processes each
// request and finally resumes all threads.
static void ManageDeoptimization()
- LOCKS_EXCLUDED(Locks::deoptimization_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
// Breakpoints.
static void WatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req)
- LOCKS_EXCLUDED(Locks::breakpoint_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
static void UnwatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req)
- LOCKS_EXCLUDED(Locks::breakpoint_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Forced interpreter checkers for single-step and continue support.
@@ -557,7 +536,7 @@
// Indicates whether we need to force the use of interpreter to invoke a method.
// This allows to single-step or continue into the called method.
static bool IsForcedInterpreterNeededForCalling(Thread* thread, ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (!IsDebuggerActive()) {
return false;
}
@@ -568,7 +547,7 @@
// method through the resolution trampoline. This allows to single-step or continue into
// the called method.
static bool IsForcedInterpreterNeededForResolution(Thread* thread, ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (!IsDebuggerActive()) {
return false;
}
@@ -579,7 +558,7 @@
// a method through the resolution trampoline. This allows to deoptimize the stack for
// debugging when we returned from the called method.
static bool IsForcedInstrumentationNeededForResolution(Thread* thread, ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (!IsDebuggerActive()) {
return false;
}
@@ -590,7 +569,7 @@
// interpreter into the runtime. This allows to deoptimize the stack and continue
// execution with interpreter for debugging.
static bool IsForcedInterpreterNeededForUpcall(Thread* thread, ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (!IsDebuggerActive()) {
return false;
}
@@ -600,10 +579,9 @@
// Single-stepping.
static JDWP::JdwpError ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize size,
JDWP::JdwpStepDepth depth)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void UnconfigureStep(JDWP::ObjectId thread_id)
- LOCKS_EXCLUDED(Locks::thread_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Invoke support
@@ -623,9 +601,8 @@
JDWP::MethodId method_id, uint32_t arg_count,
uint64_t arg_values[], JDWP::JdwpTag* arg_types,
uint32_t options)
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Called by the event thread to execute a method prepared by the JDWP thread in the given
// DebugInvokeReq object. Once the invocation completes, the event thread attaches a reply
@@ -642,30 +619,29 @@
* DDM support.
*/
static void DdmSendThreadNotification(Thread* t, uint32_t type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void DdmSetThreadNotification(bool enable)
- LOCKS_EXCLUDED(Locks::thread_list_lock_);
+ REQUIRES(!Locks::thread_list_lock_);
static bool DdmHandlePacket(JDWP::Request* request, uint8_t** pReplyBuf, int* pReplyLen);
- static void DdmConnected() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void DdmDisconnected() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void DdmConnected() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void DdmDisconnected() SHARED_REQUIRES(Locks::mutator_lock_);
static void DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void DdmSendChunk(uint32_t type, size_t len, const uint8_t* buf)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void VisitRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Allocation tracking support.
*/
- static void SetAllocTrackingEnabled(bool enabled) LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
+ static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_);
static jbyteArray GetRecentAllocations()
- LOCKS_EXCLUDED(Locks::alloc_tracker_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void DumpRecentAllocations() LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
+ REQUIRES(!Locks::alloc_tracker_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void DumpRecentAllocations() REQUIRES(!Locks::alloc_tracker_lock_);
enum HpifWhen {
HPIF_WHEN_NEVER = 0,
@@ -674,7 +650,7 @@
HPIF_WHEN_EVERY_GC = 3
};
static int DdmHandleHpifChunk(HpifWhen when)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
enum HpsgWhen {
HPSG_WHEN_NEVER = 0,
@@ -687,78 +663,76 @@
static bool DdmHandleHpsgNhsgChunk(HpsgWhen when, HpsgWhat what, bool native);
static void DdmSendHeapInfo(HpifWhen reason)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void DdmSendHeapSegments(bool native)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static ObjectRegistry* GetObjectRegistry() {
return gRegistry;
}
static JDWP::JdwpTag TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpTypeTag GetTypeTag(mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::FieldId ToFieldId(const ArtField* f)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
static JDWP::JdwpState* GetJdwpState();
- static uint32_t GetInstrumentationEvents() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static uint32_t GetInstrumentationEvents() SHARED_REQUIRES(Locks::mutator_lock_) {
return instrumentation_events_;
}
private:
static void ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id,
JDWP::JdwpTag result_tag, uint64_t result_value,
JDWP::ObjectId exception)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError GetLocalValue(const StackVisitor& visitor,
ScopedObjectAccessUnchecked& soa, int slot,
JDWP::JdwpTag tag, uint8_t* buf, size_t width)
- LOCKS_EXCLUDED(Locks::thread_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
static JDWP::JdwpError SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTag tag,
uint64_t value, size_t width)
- LOCKS_EXCLUDED(Locks::thread_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
- static void DdmBroadcast(bool connect) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void DdmBroadcast(bool connect) SHARED_REQUIRES(Locks::mutator_lock_);
static void PostThreadStartOrStop(Thread*, uint32_t)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void PostLocationEvent(ArtMethod* method, int pcOffset,
mirror::Object* thisPtr, int eventFlags,
const JValue* return_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void ProcessDeoptimizationRequest(const DeoptimizationRequest& request)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::mutator_lock_);
static void RequestDeoptimizationLocked(const DeoptimizationRequest& req)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::deoptimization_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
static bool IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static bool IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static bool IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static bool IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Indicates whether the debugger is making requests.
static bool gDebuggerActive;
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 3a15f1a..ceefdec 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -870,7 +870,7 @@
//
// This is used by runtime; therefore use art::Method not art::DexFile::Method.
int32_t GetLineNumFromPC(ArtMethod* method, uint32_t rel_pc) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void DecodeDebugInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx,
DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb,
@@ -1314,10 +1314,10 @@
EncodedStaticFieldValueIterator(const DexFile& dex_file, Handle<mirror::DexCache>* dex_cache,
Handle<mirror::ClassLoader>* class_loader,
ClassLinker* linker, const DexFile::ClassDef& class_def)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive>
- void ReadValueToField(ArtField* field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ReadValueToField(ArtField* field) const SHARED_REQUIRES(Locks::mutator_lock_);
bool HasNext() const { return pos_ < array_size_; }
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index de925b7..3e15cc5 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -41,7 +41,7 @@
inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
const InlineInfo& inline_info,
uint8_t inlining_depth)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t method_index = inline_info.GetMethodIndexAtDepth(inlining_depth);
InvokeType invoke_type = static_cast<InvokeType>(
inline_info.GetInvokeTypeAtDepth(inlining_depth));
@@ -74,7 +74,7 @@
inline ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
Runtime::CalleeSaveType type,
bool do_caller_check = false)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
@@ -110,7 +110,7 @@
}
inline ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return GetCalleeSaveMethodCaller(
self->GetManagedStack()->GetTopQuickFrame(), type, true /* do_caller_check */);
}
@@ -403,7 +403,7 @@
// Explicit template declarations of FindFieldFromCode for all field access types.
#define EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
+template SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE \
ArtField* FindFieldFromCode<_type, _access_check>(uint32_t field_idx, \
ArtMethod* referrer, \
Thread* self, size_t expected_size) \
@@ -531,7 +531,7 @@
// Explicit template declarations of FindMethodFromCode for all invoke types.
#define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
- template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
+ template SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE \
ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx, \
mirror::Object** this_object, \
ArtMethod** referrer, \
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index fc7f8b7..eaf26bc 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -38,7 +38,7 @@
ArtMethod* referrer,
Thread* self,
bool access_check)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (UNLIKELY(component_count < 0)) {
ThrowNegativeArraySizeException(component_count);
return nullptr; // Failure
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 47865a2..dc04c0a 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -45,12 +45,12 @@
ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
ArtMethod* method,
Thread* self, bool* slow_path)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
Thread* self,
bool* slow_path)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
// cannot be resolved, throw an error. If it can, use it to create an instance.
@@ -61,21 +61,21 @@
ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Given the context of a calling Method and a resolved class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Given the context of a calling Method and an initialized class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template <bool kAccessCheck>
@@ -83,7 +83,7 @@
int32_t component_count,
ArtMethod* method,
bool* slow_path)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If
// it cannot be resolved, throw an error. If it can, use it to create an array.
@@ -95,7 +95,7 @@
ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
@@ -103,13 +103,13 @@
ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, int32_t component_count,
ArtMethod* method, Thread* self,
bool access_check,
gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
extern mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx,
int32_t component_count,
@@ -117,7 +117,7 @@
Thread* self,
bool access_check,
gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Type of find field operation for fast and slow case.
enum FindFieldType {
@@ -134,47 +134,47 @@
template<FindFieldType type, bool access_check>
inline ArtField* FindFieldFromCode(
uint32_t field_idx, ArtMethod* referrer, Thread* self, size_t expected_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<InvokeType type, bool access_check>
inline ArtMethod* FindMethodFromCode(
uint32_t method_idx, mirror::Object** this_object, ArtMethod** referrer, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Fast path field resolution that can't initialize classes or throw exceptions.
inline ArtField* FindFieldFast(
uint32_t field_idx, ArtMethod* referrer, FindFieldType type, size_t expected_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Fast path method resolution that can't throw exceptions.
inline ArtMethod* FindMethodFast(
uint32_t method_idx, mirror::Object* this_object, ArtMethod* referrer, bool access_check,
InvokeType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
inline mirror::Class* ResolveVerifyAndClinit(
uint32_t type_idx, ArtMethod* referrer, Thread* self, bool can_run_clinit, bool verify_access)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
-extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+extern void ThrowStackOverflowError(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, uint32_t string_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// TODO: annotalysis disabled as monitor semantics are maintained in Java code.
inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self)
NO_THREAD_SAFETY_ANALYSIS;
void CheckReferenceResult(mirror::Object* o, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, const char* shorty,
jobject rcvr_jobj, jobject interface_art_method_jobj,
std::vector<jvalue>& args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool FillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template <typename INT_TYPE, typename FLOAT_TYPE>
inline INT_TYPE art_float_to_integral(FLOAT_TYPE f);
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index 7a44158..331de91 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -39,32 +39,32 @@
explicit ScopedQuickEntrypointChecks(Thread *self,
bool entry_check = kIsDebugBuild,
bool exit_check = kIsDebugBuild)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : self_(self), exit_check_(exit_check) {
+ SHARED_REQUIRES(Locks::mutator_lock_) : self_(self), exit_check_(exit_check) {
if (entry_check) {
TestsOnEntry();
}
}
- ScopedQuickEntrypointChecks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ ScopedQuickEntrypointChecks() SHARED_REQUIRES(Locks::mutator_lock_)
: self_(kIsDebugBuild ? Thread::Current() : nullptr), exit_check_(kIsDebugBuild) {
if (kIsDebugBuild) {
TestsOnEntry();
}
}
- ~ScopedQuickEntrypointChecks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ~ScopedQuickEntrypointChecks() SHARED_REQUIRES(Locks::mutator_lock_) {
if (exit_check_) {
TestsOnExit();
}
}
private:
- void TestsOnEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void TestsOnEntry() SHARED_REQUIRES(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(self_);
self_->VerifyStack();
}
- void TestsOnExit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void TestsOnExit() SHARED_REQUIRES(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(self_);
self_->VerifyStack();
}
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index f56b5e4..9311791 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -30,7 +30,7 @@
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
uint32_t type_idx, ArtMethod* method, Thread* self) \
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ SHARED_REQUIRES(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx); \
@@ -57,7 +57,7 @@
} \
extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
mirror::Class* klass, ArtMethod* method, Thread* self) \
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ SHARED_REQUIRES(Locks::mutator_lock_) { \
UNUSED(method); \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
@@ -84,7 +84,7 @@
} \
extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
mirror::Class* klass, ArtMethod* method, Thread* self) \
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ SHARED_REQUIRES(Locks::mutator_lock_) { \
UNUSED(method); \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
@@ -109,34 +109,34 @@
} \
extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, ArtMethod* method, Thread* self) \
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ SHARED_REQUIRES(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocObjectFromCode<true, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ SHARED_REQUIRES(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocArrayFromCode<false, instrumented_bool>(type_idx, component_count, method, self, \
allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCodeResolved##suffix##suffix2( \
mirror::Class* klass, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ SHARED_REQUIRES(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocArrayFromCodeResolved<false, instrumented_bool>(klass, component_count, method, self, \
allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ SHARED_REQUIRES(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocArrayFromCode<true, instrumented_bool>(type_idx, component_count, method, self, \
allocator_type); \
} \
extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ SHARED_REQUIRES(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (!instrumented_bool) { \
return CheckAndAllocArrayFromCode(type_idx, component_count, method, self, false, allocator_type); \
@@ -146,7 +146,7 @@
} \
extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ SHARED_REQUIRES(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (!instrumented_bool) { \
return CheckAndAllocArrayFromCode(type_idx, component_count, method, self, true, allocator_type); \
@@ -157,7 +157,7 @@
extern "C" mirror::String* artAllocStringFromBytesFromCode##suffix##suffix2( \
mirror::ByteArray* byte_array, int32_t high, int32_t offset, int32_t byte_count, \
Thread* self) \
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ SHARED_REQUIRES(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
StackHandleScope<1> hs(self); \
Handle<mirror::ByteArray> handle_array(hs.NewHandle(byte_array)); \
@@ -166,7 +166,7 @@
} \
extern "C" mirror::String* artAllocStringFromCharsFromCode##suffix##suffix2( \
int32_t offset, int32_t char_count, mirror::CharArray* char_array, Thread* self) \
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ SHARED_REQUIRES(Locks::mutator_lock_) { \
StackHandleScope<1> hs(self); \
Handle<mirror::CharArray> handle_array(hs.NewHandle(char_array)); \
return mirror::String::AllocFromCharArray<instrumented_bool>(self, char_count, handle_array, \
@@ -174,7 +174,7 @@
} \
extern "C" mirror::String* artAllocStringFromStringFromCode##suffix##suffix2( \
mirror::String* string, Thread* self) \
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ SHARED_REQUIRES(Locks::mutator_lock_) { \
StackHandleScope<1> hs(self); \
Handle<mirror::String> handle_string(hs.NewHandle(string)); \
return mirror::String::AllocFromString<instrumented_bool>(self, handle_string->GetLength(), \
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.h b/runtime/entrypoints/quick/quick_alloc_entrypoints.h
index ec0aef5..14a8e04 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.h
@@ -31,10 +31,10 @@
// holding the runtime shutdown lock and the mutator lock when we update the entrypoints.
void SetQuickAllocEntryPointsAllocator(gc::AllocatorType allocator)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::runtime_shutdown_lock_);
+ REQUIRES(Locks::mutator_lock_, Locks::runtime_shutdown_lock_);
void SetQuickAllocEntryPointsInstrumented(bool instrumented)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::runtime_shutdown_lock_);
+ REQUIRES(Locks::mutator_lock_, Locks::runtime_shutdown_lock_);
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
index 37de380..968ac53 100644
--- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
@@ -21,7 +21,7 @@
// Assignable test for code, won't throw. Null and equality tests already performed
extern "C" uint32_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(klass != nullptr);
DCHECK(ref_class != nullptr);
return klass->IsAssignableFrom(ref_class) ? 1 : 0;
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index f1b5445..a4feac1 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -28,7 +28,7 @@
namespace art {
-extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
if (VLOG_IS_ON(deopt)) {
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 3cefc47..b12b118 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -26,7 +26,7 @@
namespace art {
extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Called to ensure static storage base is initialized for direct static field reads and writes.
// A class may be accessing another class' fields when it doesn't have access, as access has been
// given by inheritance.
@@ -36,7 +36,7 @@
}
extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Called when method->dex_cache_resolved_types_[] misses.
ScopedQuickEntrypointChecks sqec(self);
auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kRefsOnly);
@@ -44,7 +44,7 @@
}
extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Called when caller isn't guaranteed to have access to a type and the dex cache may be
// unpopulated.
ScopedQuickEntrypointChecks sqec(self);
@@ -53,7 +53,7 @@
}
extern "C" mirror::String* artResolveStringFromCode(int32_t string_idx, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kRefsOnly);
return ResolveStringFromCode(caller, string_idx);
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index cef2510..3d3f7a1 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -20,6 +20,7 @@
#include <jni.h>
#include "base/macros.h"
+#include "base/mutex.h"
#include "offsets.h"
#define QUICK_ENTRYPOINT_OFFSET(ptr_size, x) \
@@ -71,6 +72,16 @@
Thread* self)
NO_THREAD_SAFETY_ANALYSIS HOT_ATTR;
+// Read barrier entrypoints.
+// Compilers for ARM, ARM64, MIPS, MIPS64 can insert a call to this function directly.
+// For x86 and x86_64, compilers need a wrapper assembly function, to handle mismatch in ABI.
+// This is the read barrier slow path for instance and static fields and reference-type arrays.
+// TODO: Currently the read barrier does not have a fast path for compilers to directly generate.
+// Ideally the slow path should only take one parameter "ref".
+extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref, mirror::Object* obj,
+ uint32_t offset)
+ SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR;
+
} // namespace art
#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index 60bbf4a..73d8ae7 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -145,7 +145,8 @@
V(NewStringFromStringBuffer, void) \
V(NewStringFromStringBuilder, void) \
\
- V(ReadBarrierJni, void, mirror::CompressedReference<mirror::Object>*, Thread*)
+ V(ReadBarrierJni, void, mirror::CompressedReference<mirror::Object>*, Thread*) \
+ V(ReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t)
#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_
#undef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_ // #define is only for lint.
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 871cf3c..0a1d806 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -27,7 +27,7 @@
extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx, ArtMethod* referrer,
Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr)) {
@@ -42,7 +42,7 @@
extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx, ArtMethod* referrer,
Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr)) {
@@ -57,7 +57,7 @@
extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx, ArtMethod* referrer,
Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr)) {
@@ -73,7 +73,7 @@
extern "C" uint16_t artGetCharStaticFromCode(uint32_t field_idx,
ArtMethod* referrer,
Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr)) {
@@ -89,7 +89,7 @@
extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx,
ArtMethod* referrer,
Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t));
if (LIKELY(field != nullptr)) {
@@ -105,7 +105,7 @@
extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx,
ArtMethod* referrer,
Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int64_t));
if (LIKELY(field != nullptr)) {
@@ -121,7 +121,7 @@
extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
ArtMethod* referrer,
Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectRead,
sizeof(mirror::HeapReference<mirror::Object>));
@@ -138,7 +138,7 @@
extern "C" int8_t artGetByteInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
ArtMethod* referrer, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -158,7 +158,7 @@
extern "C" uint8_t artGetBooleanInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
ArtMethod* referrer, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -177,7 +177,7 @@
}
extern "C" int16_t artGetShortInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
ArtMethod* referrer, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -197,7 +197,7 @@
extern "C" uint16_t artGetCharInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
ArtMethod* referrer, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -217,7 +217,7 @@
extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
ArtMethod* referrer, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -237,7 +237,7 @@
extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
ArtMethod* referrer, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -258,7 +258,7 @@
extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectRead,
sizeof(mirror::HeapReference<mirror::Object>));
@@ -279,7 +279,7 @@
extern "C" int artSet8StaticFromCode(uint32_t field_idx, uint32_t new_value,
ArtMethod* referrer, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int8_t));
if (LIKELY(field != nullptr)) {
@@ -310,7 +310,7 @@
extern "C" int artSet16StaticFromCode(uint32_t field_idx, uint16_t new_value,
ArtMethod* referrer, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int16_t));
if (LIKELY(field != nullptr)) {
@@ -341,7 +341,7 @@
extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value,
ArtMethod* referrer, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t));
if (LIKELY(field != nullptr)) {
@@ -360,7 +360,7 @@
extern "C" int artSet64StaticFromCode(uint32_t field_idx, ArtMethod* referrer,
uint64_t new_value, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t));
if (LIKELY(field != nullptr)) {
@@ -379,7 +379,7 @@
extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_value,
ArtMethod* referrer, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectWrite,
sizeof(mirror::HeapReference<mirror::Object>));
@@ -402,7 +402,7 @@
extern "C" int artSet8InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint8_t new_value,
ArtMethod* referrer, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -441,7 +441,7 @@
extern "C" int artSet16InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint16_t new_value,
ArtMethod* referrer, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int16_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -481,7 +481,7 @@
extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint32_t new_value,
ArtMethod* referrer, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -509,7 +509,7 @@
extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint64_t new_value,
ArtMethod* referrer, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int64_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -534,7 +534,7 @@
extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
mirror::Object* new_value,
ArtMethod* referrer, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
sizeof(mirror::HeapReference<mirror::Object>));
@@ -557,4 +557,16 @@
return -1; // failure
}
+// TODO: Currently the read barrier does not have a fast path. Ideally the slow path should only
+// take one parameter "ref", which is generated by the fast path.
+extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref ATTRIBUTE_UNUSED,
+ mirror::Object* obj, uint32_t offset) {
+ DCHECK(kUseReadBarrier);
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(obj) + offset;
+ mirror::HeapReference<mirror::Object>* ref_addr =
+ reinterpret_cast<mirror::HeapReference<mirror::Object>*>(raw_addr);
+ return ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, true>(obj, MemberOffset(offset),
+ ref_addr);
+}
+
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
index d3991cd..22b2fa3 100644
--- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
@@ -26,7 +26,7 @@
*/
extern "C" int artHandleFillArrayDataFromCode(uint32_t payload_offset, mirror::Array* array,
ArtMethod* method, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
const uint16_t* const insns = method->GetCodeItem()->insns_;
const Instruction::ArrayDataPayload* payload =
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 2b5c15b..ad5ee84 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -28,7 +28,7 @@
mirror::Object* this_object,
Thread* self,
uintptr_t lr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Instrumentation changes the stack. Thus, when exiting, the stack cannot be verified, so skip
// that part.
ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false);
@@ -50,7 +50,7 @@
extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self, ArtMethod** sp,
uint64_t gpr_result,
uint64_t fpr_result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Compute address of return PC and sanity check that it currently holds 0.
size_t return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kRefsOnly);
uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) +
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index de225ad..f69c39e 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -63,7 +63,7 @@
}
static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
JNIEnvExt* env = self->GetJniEnv();
env->locals.SetSegmentState(env->local_ref_cookie);
env->local_ref_cookie = saved_local_ref_cookie;
diff --git a/runtime/entrypoints/quick/quick_lock_entrypoints.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
index 4423c08..3bf001e 100644
--- a/runtime/entrypoints/quick/quick_lock_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
@@ -21,7 +21,7 @@
namespace art {
extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
ScopedQuickEntrypointChecks sqec(self);
if (UNLIKELY(obj == nullptr)) {
@@ -41,7 +41,7 @@
}
extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
NO_THREAD_SAFETY_ANALYSIS /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
ScopedQuickEntrypointChecks sqec(self);
if (UNLIKELY(obj == nullptr)) {
diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
index 87e0c6e..47b3eff 100644
--- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
@@ -19,7 +19,7 @@
namespace art {
-extern "C" void artTestSuspendFromCode(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+extern "C" void artTestSuspendFromCode(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
// Called when suspend count check value is 0 and thread->suspend_count_ != 0
ScopedQuickEntrypointChecks sqec(self);
self->CheckSuspend();
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index f22edc1..5a82b3a 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -25,14 +25,14 @@
// Deliver an exception that's pending on thread helping set up a callee save frame on the way.
extern "C" NO_RETURN void artDeliverPendingExceptionFromCode(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->QuickDeliverException();
}
// Called by generated call to throw an exception.
extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
/*
* exception may be null, in which case this routine should
* throw NPE. NOTE: this is a convenience for generated code,
@@ -51,7 +51,7 @@
// Called by generated call to throw a NPE exception.
extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->NoteSignalBeingHandled();
ThrowNullPointerExceptionFromDexPC();
@@ -61,7 +61,7 @@
// Called by generated call to throw an arithmetic divide by zero exception.
extern "C" NO_RETURN void artThrowDivZeroFromCode(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArithmeticExceptionDivideByZero();
self->QuickDeliverException();
@@ -69,14 +69,14 @@
// Called by generated call to throw an array index out of bounds exception.
extern "C" NO_RETURN void artThrowArrayBoundsFromCode(int index, int length, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArrayIndexOutOfBoundsException(index, length);
self->QuickDeliverException();
}
extern "C" NO_RETURN void artThrowStackOverflowFromCode(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->NoteSignalBeingHandled();
ThrowStackOverflowError(self);
@@ -85,7 +85,7 @@
}
extern "C" NO_RETURN void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowNoSuchMethodError(method_idx);
self->QuickDeliverException();
@@ -94,7 +94,7 @@
extern "C" NO_RETURN void artThrowClassCastException(mirror::Class* dest_type,
mirror::Class* src_type,
Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
DCHECK(!dest_type->IsAssignableFrom(src_type));
ThrowClassCastException(dest_type, src_type);
@@ -103,7 +103,7 @@
extern "C" NO_RETURN void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value,
Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArrayStoreException(value->GetClass(), array->GetClass());
self->QuickDeliverException();
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 4f76ebd..6fe2bb6 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -280,7 +280,7 @@
// kRefAndArgs runtime method. Since 'this' is a reference, it is located in the
// 1st GPR.
static mirror::Object* GetProxyThisObject(ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK((*sp)->IsProxyMethod());
CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, (*sp)->GetFrameSizeInBytes());
CHECK_GT(kNumQuickGprArgs, 0u);
@@ -291,19 +291,19 @@
return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr();
}
- static ArtMethod* GetCallingMethod(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static ArtMethod* GetCallingMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
return GetCalleeSaveMethodCaller(sp, Runtime::kRefsAndArgs);
}
- static ArtMethod* GetOuterMethod(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static ArtMethod* GetOuterMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
uint8_t* previous_sp =
reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
return *reinterpret_cast<ArtMethod**>(previous_sp);
}
- static uint32_t GetCallingDexPc(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static uint32_t GetCallingDexPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs);
ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
@@ -329,14 +329,14 @@
}
// For the given quick ref and args quick frame, return the caller's PC.
- static uintptr_t GetCallingPc(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static uintptr_t GetCallingPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
return *reinterpret_cast<uintptr_t*>(lr);
}
QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
- uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+ uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) :
is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
@@ -421,7 +421,7 @@
}
}
- void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void VisitArguments() SHARED_REQUIRES(Locks::mutator_lock_) {
// (a) 'stack_args_' should point to the first method's argument
// (b) whatever the argument type it is, the 'stack_index_' should
// be moved forward along with every visiting.
@@ -571,7 +571,7 @@
// Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It
// allows to use the QuickArgumentVisitor constants without moving all the code in its own module.
extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return QuickArgumentVisitor::GetProxyThisObject(sp);
}
@@ -582,7 +582,7 @@
uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
- void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+ void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
private:
ShadowFrame* const sf_;
@@ -625,7 +625,7 @@
}
extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Ensure we don't get thread suspension until the object arguments are safely in the shadow
// frame.
ScopedQuickEntrypointChecks sqec(self);
@@ -692,9 +692,9 @@
ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
- void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+ void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
- void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_);
private:
ScopedObjectAccessUnchecked* const soa_;
@@ -753,7 +753,7 @@
// field within the proxy object, which will box the primitive arguments and deal with error cases.
extern "C" uint64_t artQuickProxyInvokeHandler(
ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
// Ensure we don't get thread suspension until the object arguments are safely in jobjects.
@@ -809,9 +809,9 @@
uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
- void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+ void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
- void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_);
private:
ScopedObjectAccessUnchecked* const soa_;
@@ -842,7 +842,7 @@
// Lazily resolve a method for quick. Called by stub code.
extern "C" const void* artQuickResolutionTrampoline(
ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// The resolution trampoline stashes the resolved method into the callee-save frame to transport
// it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely
// does not have the same stack layout as the callee-save method).
@@ -1196,7 +1196,7 @@
return gpr_index_ > 0;
}
- void AdvanceHandleScope(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void AdvanceHandleScope(mirror::Object* ptr) SHARED_REQUIRES(Locks::mutator_lock_) {
uintptr_t handle = PushHandle(ptr);
if (HaveHandleScopeGpr()) {
gpr_index_--;
@@ -1384,7 +1384,7 @@
void PushStack(uintptr_t val) {
delegate_->PushStack(val);
}
- uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uintptr_t PushHandle(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) {
return delegate_->PushHandle(ref);
}
@@ -1443,11 +1443,11 @@
}
virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
UNUSED(sm);
}
- void Walk(const char* shorty, uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void Walk(const char* shorty, uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) {
BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
WalkHeader(&sm);
@@ -1519,7 +1519,7 @@
//
// Note: assumes ComputeAll() has been run before.
void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* method = **m;
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
@@ -1560,7 +1560,7 @@
// Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie.
// Returns the new bottom. Note: this may be unaligned.
uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// First, fix up the layout of the callee-save frame.
// We have to squeeze in the HandleScope, and relocate the method pointer.
LayoutCalleeSaveFrame(self, m, sp, handle_scope);
@@ -1578,7 +1578,7 @@
uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len,
HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr,
uint32_t** start_fpr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Walk(shorty, shorty_len);
// JNI part.
@@ -1594,7 +1594,7 @@
// Add JNIEnv* and jobj/jclass before the shorty-derived elements.
void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
uint32_t num_handle_scope_references_;
@@ -1650,7 +1650,7 @@
cur_stack_arg_++;
}
- virtual uintptr_t PushHandle(mirror::Object*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ virtual uintptr_t PushHandle(mirror::Object*) SHARED_REQUIRES(Locks::mutator_lock_) {
LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
UNREACHABLE();
}
@@ -1688,16 +1688,16 @@
}
}
- void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+ void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
- void FinalizeHandleScope(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FinalizeHandleScope(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
StackReference<mirror::Object>* GetFirstHandleScopeEntry()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return handle_scope_->GetHandle(0).GetReference();
}
- jobject GetFirstHandleScopeJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jobject GetFirstHandleScopeJObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
return handle_scope_->GetHandle(0).ToJObject();
}
@@ -1713,7 +1713,7 @@
HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args),
handle_scope_(handle_scope), cur_entry_(0) {}
- uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
@@ -1721,7 +1721,7 @@
cur_entry_ = 0U;
}
- void ResetRemainingScopeSlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void ResetRemainingScopeSlots() SHARED_REQUIRES(Locks::mutator_lock_) {
// Initialize padding entries.
size_t expected_slots = handle_scope_->NumberOfReferences();
while (cur_entry_ < expected_slots) {
@@ -1841,7 +1841,7 @@
* 2) An error, if the value is negative.
*/
extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* called = *sp;
DCHECK(called->IsNative()) << PrettyMethod(called, true);
uint32_t shorty_len = 0;
@@ -1914,7 +1914,7 @@
* unlocking.
*/
extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result, uint64_t result_f)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
ArtMethod* called = *sp;
@@ -1971,7 +1971,7 @@
// for the method pointer.
//
// It is valid to use this, as at the usage points here (returns from C functions) we are assuming
-// to hold the mutator lock (see SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) annotations).
+// to hold the mutator lock (see SHARED_REQUIRES(Locks::mutator_lock_) annotations).
template<InvokeType type, bool access_check>
static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, Thread* self,
@@ -2013,7 +2013,7 @@
// Explicit artInvokeCommon template function declarations to please analysis tool.
#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \
- template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
+ template SHARED_REQUIRES(Locks::mutator_lock_) \
TwoWordReturn artInvokeCommon<type, access_check>( \
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
@@ -2032,31 +2032,31 @@
// See comments in runtime_support_asm.S
extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return artInvokeCommon<kStatic, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp);
}
@@ -2064,7 +2064,7 @@
extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t dex_method_idx,
mirror::Object* this_object,
Thread* self, ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
// The optimizing compiler currently does not inline methods that have an interface
// invocation. We use the outer method directly to avoid fetching a stack map, which is
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index c05c935..f7a3cd5 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -311,8 +311,9 @@
sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromStringBuilder, pReadBarrierJni,
sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierJni, pReadBarrierSlow, sizeof(void*));
- CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pReadBarrierJni)
+ CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pReadBarrierSlow)
+ sizeof(void*) == sizeof(QuickEntryPoints), QuickEntryPoints_all);
}
};
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 93f32e8..55b1772 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -74,12 +74,12 @@
// Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
// Returns false if we overflowed the stack.
- bool AtomicPushBackIgnoreGrowthLimit(T* value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool AtomicPushBackIgnoreGrowthLimit(T* value) SHARED_REQUIRES(Locks::mutator_lock_) {
return AtomicPushBackInternal(value, capacity_);
}
// Returns false if we overflowed the stack.
- bool AtomicPushBack(T* value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool AtomicPushBack(T* value) SHARED_REQUIRES(Locks::mutator_lock_) {
return AtomicPushBackInternal(value, growth_limit_);
}
@@ -87,7 +87,7 @@
// slots. Returns false if we overflowed the stack.
bool AtomicBumpBack(size_t num_slots, StackReference<T>** start_address,
StackReference<T>** end_address)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (kIsDebugBuild) {
debug_is_sorted_ = false;
}
@@ -113,7 +113,7 @@
return true;
}
- void AssertAllZero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void AssertAllZero() SHARED_REQUIRES(Locks::mutator_lock_) {
if (kIsDebugBuild) {
for (size_t i = 0; i < capacity_; ++i) {
DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr)) << "i=" << i;
@@ -121,7 +121,7 @@
}
}
- void PushBack(T* value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void PushBack(T* value) SHARED_REQUIRES(Locks::mutator_lock_) {
if (kIsDebugBuild) {
debug_is_sorted_ = false;
}
@@ -131,7 +131,7 @@
begin_[index].Assign(value);
}
- T* PopBack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ T* PopBack() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK_GT(back_index_.LoadRelaxed(), front_index_.LoadRelaxed());
// Decrement the back index non atomically.
back_index_.StoreRelaxed(back_index_.LoadRelaxed() - 1);
@@ -194,12 +194,12 @@
}
}
- bool ContainsSorted(const T* value) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool ContainsSorted(const T* value) const SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(debug_is_sorted_);
return std::binary_search(Begin(), End(), value, ObjectComparator());
}
- bool Contains(const T* value) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool Contains(const T* value) const SHARED_REQUIRES(Locks::mutator_lock_) {
for (auto cur = Begin(), end = End(); cur != end; ++cur) {
if (cur->AsMirrorPtr() == value) {
return true;
@@ -221,7 +221,7 @@
// Returns false if we overflowed the stack.
bool AtomicPushBackInternal(T* value, size_t limit) ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (kIsDebugBuild) {
debug_is_sorted_ = false;
}
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 34e6aa3..88a6c6c 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -107,8 +107,8 @@
size_t Scan(SpaceBitmap<kObjectAlignment>* bitmap, uint8_t* scan_begin, uint8_t* scan_end,
const Visitor& visitor,
const uint8_t minimum_age = kCardDirty) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Assertion used to check the given address is covered by the card table
void CheckAddrIsInCardTable(const uint8_t* addr) const;
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index 1648aef..0b96979 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -35,34 +35,34 @@
class HeapBitmap {
public:
- bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- void Clear(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ bool Test(const mirror::Object* obj) SHARED_REQUIRES(Locks::heap_bitmap_lock_);
+ void Clear(const mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_);
template<typename LargeObjectSetVisitor>
bool Set(const mirror::Object* obj, const LargeObjectSetVisitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
template<typename LargeObjectSetVisitor>
bool AtomicTestAndSet(const mirror::Object* obj, const LargeObjectSetVisitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
ContinuousSpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) const;
LargeObjectBitmap* GetLargeObjectBitmap(const mirror::Object* obj) const;
void Walk(ObjectCallback* callback, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_);
template <typename Visitor>
void Visit(const Visitor& visitor)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC.
void ReplaceBitmap(ContinuousSpaceBitmap* old_bitmap, ContinuousSpaceBitmap* new_bitmap)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_);
// Find and replace a object set pointer, this is used by for the bitmap swapping in the GC.
void ReplaceLargeObjectBitmap(LargeObjectBitmap* old_bitmap, LargeObjectBitmap* new_bitmap)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_);
explicit HeapBitmap(Heap* heap) : heap_(heap) {}
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 009254b..157f609 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -100,7 +100,7 @@
// Extra parameters are required since we use this same visitor signature for checking objects.
void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Only add the reference if it is non null and fits our criteria.
mirror::HeapReference<Object>* const obj_ptr = obj->GetFieldObjectReferenceAddr(offset);
mirror::Object* ref = obj_ptr->AsMirrorPtr();
@@ -110,6 +110,18 @@
}
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (kIsDebugBuild && !root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(from_space_->HasAddress(root->AsMirrorPtr()));
+ }
+
private:
MarkObjectVisitor* const visitor_;
// Space which we are scanning
@@ -131,8 +143,8 @@
contains_reference_to_other_space_(contains_reference_to_other_space) {}
void operator()(Object* root) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(root != nullptr);
ModUnionUpdateObjectReferencesVisitor ref_visitor(visitor_, from_space_, immune_space_,
contains_reference_to_other_space_);
@@ -163,8 +175,8 @@
}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::HeapReference<Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
mirror::Object* ref = ref_ptr->AsMirrorPtr();
// Only add the reference if it is non null and fits our criteria.
@@ -174,6 +186,18 @@
}
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (kIsDebugBuild && !root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(!mod_union_table_->ShouldAddReference(root->AsMirrorPtr()));
+ }
+
private:
ModUnionTableReferenceCache* const mod_union_table_;
std::vector<mirror::HeapReference<Object>*>* const references_;
@@ -188,7 +212,7 @@
}
void operator()(Object* obj) const
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
// We don't have an early exit since we use the visitor pattern, an early
// exit should significantly speed this up.
AddToReferenceArrayVisitor visitor(mod_union_table_, references_);
@@ -208,8 +232,8 @@
}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (ref != nullptr && mod_union_table_->ShouldAddReference(ref) &&
references_.find(ref) == references_.end()) {
@@ -228,6 +252,18 @@
}
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (kIsDebugBuild && !root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(!mod_union_table_->ShouldAddReference(root->AsMirrorPtr()));
+ }
+
private:
ModUnionTableReferenceCache* const mod_union_table_;
const std::set<const Object*>& references_;
@@ -237,7 +273,7 @@
public:
explicit ModUnionCheckReferences(ModUnionTableReferenceCache* mod_union_table,
const std::set<const Object*>& references)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_)
: mod_union_table_(mod_union_table), references_(references) {
}
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 520cc1c..5888193 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -82,7 +82,7 @@
// for said cards. Exclusive lock is required since verify sometimes uses
// SpaceBitmap::VisitMarkedRange and VisitMarkedRange can't know if the callback will modify the
// bitmap or not.
- virtual void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) = 0;
+ virtual void Verify() REQUIRES(Locks::heap_bitmap_lock_) = 0;
// Returns true if a card is marked inside the mod union table. Used for testing. The address
// doesn't need to be aligned.
@@ -118,21 +118,21 @@
// Update table based on cleared cards and mark all references to the other spaces.
void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
// Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
// VisitMarkedRange can't know if the callback will modify the bitmap or not.
void Verify() OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
// Function that tells whether or not to add a reference to the table.
virtual bool ShouldAddReference(const mirror::Object* ref) const = 0;
virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
- virtual void Dump(std::ostream& os) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void Dump(std::ostream& os) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
virtual void SetCards() OVERRIDE;
@@ -158,8 +158,8 @@
// Mark all references to the alloc space(s).
virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Nothing to verify.
virtual void Verify() OVERRIDE {}
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index aad8a25..edab1b0 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -46,7 +46,7 @@
}
mirror::ObjectArray<mirror::Object>* AllocObjectArray(
Thread* self, space::ContinuousMemMapAllocSpace* space, size_t component_count)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
auto* klass = GetObjectArrayClass(self, space);
const size_t size = mirror::ComputeArraySize(component_count, 2);
size_t bytes_allocated = 0, bytes_tl_bulk_allocated;
@@ -67,7 +67,7 @@
private:
mirror::Class* GetObjectArrayClass(Thread* self, space::ContinuousMemMapAllocSpace* space)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (java_lang_object_array_ == nullptr) {
java_lang_object_array_ =
Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kObjectArrayClass);
@@ -97,12 +97,12 @@
public:
explicit CollectVisitedVisitor(std::set<mirror::Object*>* out) : out_(out) {}
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(ref != nullptr);
MarkObject(ref->AsMirrorPtr());
}
virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
out_->insert(obj);
return obj;
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 23ab8df..b9f24f3 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -67,8 +67,8 @@
: collector_(collector), target_space_(target_space),
contains_reference_to_target_space_(contains_reference_to_target_space) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
if (target_space_->HasAddress(ref_ptr->AsMirrorPtr())) {
@@ -79,14 +79,29 @@
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
if (target_space_->HasAddress(ref->GetReferent())) {
*contains_reference_to_target_space_ = true;
collector_->DelayReferenceReferent(klass, ref);
}
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (target_space_->HasAddress(root->AsMirrorPtr())) {
+ *contains_reference_to_target_space_ = true;
+ root->Assign(collector_->MarkObject(root->AsMirrorPtr()));
+ DCHECK(!target_space_->HasAddress(root->AsMirrorPtr()));
+ }
+ }
+
private:
collector::GarbageCollector* const collector_;
space::ContinuousSpace* const target_space_;
@@ -101,8 +116,8 @@
: collector_(collector), target_space_(target_space),
contains_reference_to_target_space_(contains_reference_to_target_space) {}
- void operator()(mirror::Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void operator()(mirror::Object* obj) const REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RememberedSetReferenceVisitor visitor(target_space_, contains_reference_to_target_space_,
collector_);
obj->VisitReferences<kMovingClasses>(visitor, visitor);
diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h
index affe863..3a0dcf7 100644
--- a/runtime/gc/accounting/remembered_set.h
+++ b/runtime/gc/accounting/remembered_set.h
@@ -56,8 +56,8 @@
// Mark through all references to the target space.
void UpdateAndMarkReferences(space::ContinuousSpace* target_space,
collector::GarbageCollector* collector)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void Dump(std::ostream& os);
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index cdeaa50..7914b66 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -188,7 +188,7 @@
void SpaceBitmap<kAlignment>::WalkInstanceFields(SpaceBitmap<kAlignment>* visited,
ObjectCallback* callback, mirror::Object* obj,
mirror::Class* klass, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Visit fields of parent classes first.
mirror::Class* super = klass->GetSuperClass();
if (super != nullptr) {
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index e0661b6..b8ff471 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -123,7 +123,7 @@
// Visit the live objects in the range [visit_begin, visit_end).
// TODO: Use lock annotations when clang is fixed.
- // EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // REQUIRES(Locks::heap_bitmap_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
template <typename Visitor>
void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const
NO_THREAD_SAFETY_ANALYSIS;
@@ -131,12 +131,12 @@
// Visits set bits in address order. The callback is not permitted to change the bitmap bits or
// max during the traversal.
void Walk(ObjectCallback* callback, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_);
// Visits set bits with an in order traversal. The callback is not permitted to change the bitmap
// bits or max during the traversal.
void InOrderWalk(ObjectCallback* callback, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Walk through the bitmaps in increasing address order, and find the object pointers that
// correspond to garbage objects. Call <callback> zero or more times with lists of these object
@@ -204,12 +204,12 @@
// For an unvisited object, visit it then all its children found via fields.
static void WalkFieldsInOrder(SpaceBitmap* visited, ObjectCallback* callback, mirror::Object* obj,
- void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void* arg) SHARED_REQUIRES(Locks::mutator_lock_);
// Walk instance fields of the given Class. Separate function to allow recursion on the super
// class.
static void WalkInstanceFields(SpaceBitmap<kAlignment>* visited, ObjectCallback* callback,
mirror::Object* obj, mirror::Class* klass, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Backing storage for bitmap.
std::unique_ptr<MemMap> mem_map_;
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 3108b7c..16c9354 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -20,7 +20,7 @@
#include "base/stl_util.h"
#include "stack.h"
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
#include "cutils/properties.h"
#endif
@@ -42,7 +42,7 @@
}
void AllocRecordObjectMap::SetProperties() {
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
// Check whether there's a system property overriding the max number of records.
const char* propertyName = "dalvik.vm.allocTrackerMax";
char allocMaxString[PROPERTY_VALUE_MAX];
@@ -111,8 +111,8 @@
}
static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::alloc_tracker_lock_) {
GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
// This does not need a read barrier because this is called by GC.
mirror::Object* old_object = klass.Read<kWithoutReadBarrier>();
@@ -177,7 +177,7 @@
struct AllocRecordStackVisitor : public StackVisitor {
AllocRecordStackVisitor(Thread* thread, AllocRecordStackTrace* trace_in, size_t max)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
trace(trace_in),
depth(0),
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index 933363b..0a4f532 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -39,7 +39,7 @@
public:
AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) {}
- int32_t ComputeLineNumber() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t ComputeLineNumber() const SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* GetMethod() const {
return method_;
@@ -184,14 +184,14 @@
return trace_->GetTid();
}
- mirror::Class* GetClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* GetClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
return klass_.Read();
}
const char* GetClassDescriptor(std::string* storage) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- GcRoot<mirror::Class>& GetClassGcRoot() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ GcRoot<mirror::Class>& GetClassGcRoot() SHARED_REQUIRES(Locks::mutator_lock_) {
return klass_;
}
@@ -221,12 +221,12 @@
// in order to make sure the AllocRecordObjectMap object is not null.
static void RecordAllocation(Thread* self, mirror::Object* obj, mirror::Class* klass,
size_t byte_count)
- LOCKS_EXCLUDED(Locks::alloc_tracker_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::alloc_tracker_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
- static void SetAllocTrackingEnabled(bool enabled) LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
+ static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_);
- AllocRecordObjectMap() EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_)
+ AllocRecordObjectMap() REQUIRES(Locks::alloc_tracker_lock_)
: alloc_record_max_(kDefaultNumAllocRecords),
recent_record_max_(kDefaultNumRecentRecords),
max_stack_depth_(kDefaultAllocStackDepth),
@@ -238,8 +238,8 @@
~AllocRecordObjectMap();
void Put(mirror::Object* obj, AllocRecord* record)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::alloc_tracker_lock_) {
if (entries_.size() == alloc_record_max_) {
delete entries_.front().second;
entries_.pop_front();
@@ -247,23 +247,23 @@
entries_.emplace_back(GcRoot<mirror::Object>(obj), record);
}
- size_t Size() const SHARED_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+ size_t Size() const SHARED_REQUIRES(Locks::alloc_tracker_lock_) {
return entries_.size();
}
- size_t GetRecentAllocationSize() const SHARED_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+ size_t GetRecentAllocationSize() const SHARED_REQUIRES(Locks::alloc_tracker_lock_) {
CHECK_LE(recent_record_max_, alloc_record_max_);
size_t sz = entries_.size();
return std::min(recent_record_max_, sz);
}
void VisitRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::alloc_tracker_lock_);
void SweepAllocationRecords(IsMarkedVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::alloc_tracker_lock_);
// Allocation tracking could be enabled by user in between DisallowNewAllocationRecords() and
// AllowNewAllocationRecords(), in which case new allocation records can be added although they
@@ -272,34 +272,34 @@
// swept from the list. But missing the first few records is acceptable for using the button to
// enable allocation tracking.
void DisallowNewAllocationRecords()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::alloc_tracker_lock_);
void AllowNewAllocationRecords()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::alloc_tracker_lock_);
// TODO: Is there a better way to hide the entries_'s type?
EntryList::iterator Begin()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::alloc_tracker_lock_) {
return entries_.begin();
}
EntryList::iterator End()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::alloc_tracker_lock_) {
return entries_.end();
}
EntryList::reverse_iterator RBegin()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::alloc_tracker_lock_) {
return entries_.rbegin();
}
EntryList::reverse_iterator REnd()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::alloc_tracker_lock_) {
return entries_.rend();
}
@@ -318,7 +318,7 @@
// see the comment in typedef of EntryList
EntryList entries_ GUARDED_BY(Locks::alloc_tracker_lock_);
- void SetProperties() EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
+ void SetProperties() REQUIRES(Locks::alloc_tracker_lock_);
};
} // namespace gc
diff --git a/runtime/gc/allocator/dlmalloc.h b/runtime/gc/allocator/dlmalloc.h
index 0e91a43..0558921 100644
--- a/runtime/gc/allocator/dlmalloc.h
+++ b/runtime/gc/allocator/dlmalloc.h
@@ -35,7 +35,7 @@
#include "../../bionic/libc/upstream-dlmalloc/malloc.h"
#pragma GCC diagnostic pop
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
// Define dlmalloc routines from bionic that cannot be included directly because of redefining
// symbols from the include above.
extern "C" void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), void* arg);
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index abaa97f..470bc1c 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -1170,7 +1170,7 @@
// First mark slots to free in the bulk free bit map without locking the
// size bracket locks. On host, unordered_set is faster than vector + flag.
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
std::vector<Run*> runs;
#else
std::unordered_set<Run*, hash_run, eq_run> runs;
@@ -1237,7 +1237,7 @@
DCHECK_EQ(run->magic_num_, kMagicNum);
// Set the bit in the bulk free bit map.
freed_bytes += run->MarkBulkFreeBitMap(ptr);
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
if (!run->to_be_bulk_freed_) {
run->to_be_bulk_freed_ = true;
runs.push_back(run);
@@ -1252,7 +1252,7 @@
// union the bulk free bit map into the thread-local free bit map
// (for thread-local runs.)
for (Run* run : runs) {
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
DCHECK(run->to_be_bulk_freed_);
run->to_be_bulk_freed_ = false;
#endif
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index c356a39..a7f29af 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -51,7 +51,7 @@
bool IsFree() const {
return !kIsDebugBuild || magic_num_ == kMagicNumFree;
}
- size_t ByteSize(RosAlloc* rosalloc) const EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
+ size_t ByteSize(RosAlloc* rosalloc) const REQUIRES(rosalloc->lock_) {
const uint8_t* fpr_base = reinterpret_cast<const uint8_t*>(this);
size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx];
@@ -60,7 +60,7 @@
return byte_size;
}
void SetByteSize(RosAlloc* rosalloc, size_t byte_size)
- EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
+ REQUIRES(rosalloc->lock_) {
DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
@@ -69,20 +69,20 @@
void* Begin() {
return reinterpret_cast<void*>(this);
}
- void* End(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
+ void* End(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) {
uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
uint8_t* end = fpr_base + ByteSize(rosalloc);
return end;
}
bool IsLargerThanPageReleaseThreshold(RosAlloc* rosalloc)
- EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
+ REQUIRES(rosalloc->lock_) {
return ByteSize(rosalloc) >= rosalloc->page_release_size_threshold_;
}
bool IsAtEndOfSpace(RosAlloc* rosalloc)
- EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
+ REQUIRES(rosalloc->lock_) {
return reinterpret_cast<uint8_t*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_;
}
- bool ShouldReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
+ bool ShouldReleasePages(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) {
switch (rosalloc->page_release_mode_) {
case kPageReleaseModeNone:
return false;
@@ -99,7 +99,7 @@
return false;
}
}
- void ReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
+ void ReleasePages(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) {
uint8_t* start = reinterpret_cast<uint8_t*>(this);
size_t byte_size = ByteSize(rosalloc);
DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
@@ -254,8 +254,8 @@
std::string Dump();
// Verify for debugging.
void Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
+ REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::thread_list_lock_);
private:
// The common part of MarkFreeBitMap() and MarkThreadLocalFreeBitMap(). Returns the bracket
@@ -512,51 +512,51 @@
// Page-granularity alloc/free
void* AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type)
- EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ REQUIRES(lock_);
// Returns how many bytes were freed.
- size_t FreePages(Thread* self, void* ptr, bool already_zero) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ size_t FreePages(Thread* self, void* ptr, bool already_zero) REQUIRES(lock_);
// Allocate/free a run slot.
void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size,
size_t* bytes_tl_bulk_allocated)
- LOCKS_EXCLUDED(lock_);
+ REQUIRES(!lock_);
// Allocate/free a run slot without acquiring locks.
- // TODO: EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+ // TODO: REQUIRES(Locks::mutator_lock_)
void* AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- LOCKS_EXCLUDED(lock_);
- void* AllocFromCurrentRunUnlocked(Thread* self, size_t idx);
+ REQUIRES(!lock_);
+ void* AllocFromCurrentRunUnlocked(Thread* self, size_t idx) REQUIRES(!lock_);
// Returns the bracket size.
size_t FreeFromRun(Thread* self, void* ptr, Run* run)
- LOCKS_EXCLUDED(lock_);
+ REQUIRES(!lock_);
// Used to allocate a new thread local run for a size bracket.
- Run* AllocRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_);
+ Run* AllocRun(Thread* self, size_t idx) REQUIRES(!lock_);
// Used to acquire a new/reused run for a size bracket. Used when a
// thread-local or current run gets full.
- Run* RefillRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_);
+ Run* RefillRun(Thread* self, size_t idx) REQUIRES(!lock_);
// The internal of non-bulk Free().
- size_t FreeInternal(Thread* self, void* ptr) LOCKS_EXCLUDED(lock_);
+ size_t FreeInternal(Thread* self, void* ptr) REQUIRES(!lock_);
// Allocates large objects.
void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- LOCKS_EXCLUDED(lock_);
+ REQUIRES(!lock_);
// Revoke a run by adding it to non_full_runs_ or freeing the pages.
- void RevokeRun(Thread* self, size_t idx, Run* run);
+ void RevokeRun(Thread* self, size_t idx, Run* run) REQUIRES(!lock_);
// Revoke the current runs which share an index with the thread local runs.
- void RevokeThreadUnsafeCurrentRuns();
+ void RevokeThreadUnsafeCurrentRuns() REQUIRES(!lock_);
// Release a range of pages.
- size_t ReleasePageRange(uint8_t* start, uint8_t* end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ size_t ReleasePageRange(uint8_t* start, uint8_t* end) REQUIRES(lock_);
// Dumps the page map for debugging.
- std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ std::string DumpPageMap() REQUIRES(lock_);
public:
RosAlloc(void* base, size_t capacity, size_t max_capacity,
@@ -570,11 +570,11 @@
template<bool kThreadSafe = true>
void* Alloc(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size,
size_t* bytes_tl_bulk_allocated)
- LOCKS_EXCLUDED(lock_);
+ REQUIRES(!lock_);
size_t Free(Thread* self, void* ptr)
- LOCKS_EXCLUDED(bulk_free_lock_);
+ REQUIRES(!bulk_free_lock_, !lock_);
size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
- LOCKS_EXCLUDED(bulk_free_lock_);
+ REQUIRES(!bulk_free_lock_, !lock_);
// Returns true if the given allocation request can be allocated in
// an existing thread local run without allocating a new run.
@@ -589,7 +589,7 @@
ALWAYS_INLINE size_t MaxBytesBulkAllocatedFor(size_t size);
// Returns the size of the allocated slot for a given allocated memory chunk.
- size_t UsableSize(const void* ptr);
+ size_t UsableSize(const void* ptr) REQUIRES(!lock_);
// Returns the size of the allocated slot for a given size.
size_t UsableSize(size_t bytes) {
if (UNLIKELY(bytes > kLargeSizeThreshold)) {
@@ -600,33 +600,33 @@
}
// Try to reduce the current footprint by releasing the free page
// run at the end of the memory region, if any.
- bool Trim();
+ bool Trim() REQUIRES(!lock_);
// Iterates over all the memory slots and apply the given function.
void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
void* arg)
- LOCKS_EXCLUDED(lock_);
+ REQUIRES(!lock_);
// Release empty pages.
- size_t ReleasePages() LOCKS_EXCLUDED(lock_);
+ size_t ReleasePages() REQUIRES(!lock_);
// Returns the current footprint.
- size_t Footprint() LOCKS_EXCLUDED(lock_);
+ size_t Footprint() REQUIRES(!lock_);
// Returns the current capacity, maximum footprint.
- size_t FootprintLimit() LOCKS_EXCLUDED(lock_);
+ size_t FootprintLimit() REQUIRES(!lock_);
// Update the current capacity.
- void SetFootprintLimit(size_t bytes) LOCKS_EXCLUDED(lock_);
+ void SetFootprintLimit(size_t bytes) REQUIRES(!lock_);
// Releases the thread-local runs assigned to the given thread back to the common set of runs.
// Returns the total bytes of free slots in the revoked thread local runs. This is to be
// subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting.
- size_t RevokeThreadLocalRuns(Thread* thread);
+ size_t RevokeThreadLocalRuns(Thread* thread) REQUIRES(!lock_, !bulk_free_lock_);
// Releases the thread-local runs assigned to all the threads back to the common set of runs.
// Returns the total bytes of free slots in the revoked thread local runs. This is to be
// subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting.
- size_t RevokeAllThreadLocalRuns() LOCKS_EXCLUDED(Locks::thread_list_lock_);
+ size_t RevokeAllThreadLocalRuns() REQUIRES(!Locks::thread_list_lock_, !lock_, !bulk_free_lock_);
// Assert the thread local runs of a thread are revoked.
- void AssertThreadLocalRunsAreRevoked(Thread* thread);
+ void AssertThreadLocalRunsAreRevoked(Thread* thread) REQUIRES(!bulk_free_lock_);
// Assert all the thread local runs are revoked.
- void AssertAllThreadLocalRunsAreRevoked() LOCKS_EXCLUDED(Locks::thread_list_lock_);
+ void AssertAllThreadLocalRunsAreRevoked() REQUIRES(!Locks::thread_list_lock_, !bulk_free_lock_);
static Run* GetDedicatedFullRun() {
return dedicated_full_run_;
@@ -647,9 +647,11 @@
}
// Verify for debugging.
- void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Verify() REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !bulk_free_lock_,
+ !lock_);
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes);
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes)
+ REQUIRES(!bulk_free_lock_, !lock_);
private:
friend std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index c803655..ec689f8 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -185,7 +185,7 @@
: concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
}
- virtual void Run(Thread* thread) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -221,7 +221,7 @@
: concurrent_copying_(concurrent_copying) {
}
- virtual void Run(Thread* thread) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
ConcurrentCopying* cc = concurrent_copying_;
TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
// Note: self is not necessarily equal to thread since thread may be suspended.
@@ -290,8 +290,8 @@
explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc)
: collector_(cc) {}
- void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
DCHECK(obj != nullptr);
DCHECK(collector_->immune_region_.ContainsObject(obj));
accounting::ContinuousSpaceBitmap* cc_bitmap =
@@ -599,7 +599,7 @@
: collector_(collector) {}
void operator()(mirror::Object* ref) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
if (ref == nullptr) {
// OK.
return;
@@ -624,7 +624,7 @@
}
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(root != nullptr);
operator()(root);
}
@@ -638,19 +638,32 @@
explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
visitor(ref);
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
this->operator()(ref, mirror::Reference::ReferentOffset(), false);
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
+ visitor(root->AsMirrorPtr());
+ }
+
private:
ConcurrentCopying* const collector_;
};
@@ -660,11 +673,11 @@
explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* obj) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectCallback(obj, collector_);
}
static void ObjectCallback(mirror::Object* obj, void *arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(obj != nullptr);
ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
space::RegionSpace* region_space = collector->RegionSpace();
@@ -733,7 +746,7 @@
: collector_(collector) {}
void operator()(mirror::Object* ref) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
if (ref == nullptr) {
// OK.
return;
@@ -750,18 +763,31 @@
explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
visitor(ref);
}
- void operator()(mirror::Class* klass, mirror::Reference* /* ref */) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
+ SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
+ visitor(root->AsMirrorPtr());
+ }
+
private:
ConcurrentCopying* const collector_;
};
@@ -771,11 +797,11 @@
explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* obj) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectCallback(obj, collector_);
}
static void ObjectCallback(mirror::Object* obj, void *arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(obj != nullptr);
ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
space::RegionSpace* region_space = collector->RegionSpace();
@@ -1130,8 +1156,8 @@
#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
NO_RETURN
#endif
- void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
DCHECK(obj != nullptr);
DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj;
DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj;
@@ -1277,8 +1303,8 @@
public:
explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc)
: collector_(cc) {}
- void operator()(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ void operator()(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
DCHECK(ref != nullptr);
DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref;
DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref;
@@ -1335,7 +1361,7 @@
template <class MirrorType>
ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
@@ -1343,13 +1369,13 @@
template <class MirrorType>
void VisitRoot(mirror::Object** root)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
}
template <class MirrorType>
void VisitRoot(mirror::CompressedReference<MirrorType>* root)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
}
};
@@ -1489,17 +1515,29 @@
: collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
- const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
collector_->Process(obj, offset);
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
collector_->DelayReferenceReferent(klass, ref);
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ collector_->MarkRoot(root);
+ }
+
private:
ConcurrentCopying* const collector_;
};
@@ -1513,7 +1551,8 @@
// Process a field.
inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
- mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
+ mirror::Object* ref = obj->GetFieldObject<
+ mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
if (ref == nullptr || region_space_->IsInToSpace(ref)) {
return;
}
@@ -1530,8 +1569,8 @@
// It was updated by the mutator.
break;
}
- } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<false, false, kVerifyNone>(
- offset, expected_ref, new_ref));
+ } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<
+ false, false, kVerifyNone>(offset, expected_ref, new_ref));
}
// Process some roots.
@@ -1559,22 +1598,18 @@
}
}
-void ConcurrentCopying::VisitRoots(
- mirror::CompressedReference<mirror::Object>** roots, size_t count,
- const RootInfo& info ATTRIBUTE_UNUSED) {
- for (size_t i = 0; i < count; ++i) {
- mirror::CompressedReference<mirror::Object>* root = roots[i];
- mirror::Object* ref = root->AsMirrorPtr();
- if (ref == nullptr || region_space_->IsInToSpace(ref)) {
- continue;
- }
- mirror::Object* to_ref = Mark(ref);
- if (to_ref == ref) {
- continue;
- }
+void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
+ DCHECK(!root->IsNull());
+ mirror::Object* const ref = root->AsMirrorPtr();
+ if (region_space_->IsInToSpace(ref)) {
+ return;
+ }
+ mirror::Object* to_ref = Mark(ref);
+ if (to_ref != ref) {
auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
+ // If the cas fails, then it was updated by the mutator.
do {
if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
// It was updated by the mutator.
@@ -1584,6 +1619,17 @@
}
}
+void ConcurrentCopying::VisitRoots(
+ mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED) {
+ for (size_t i = 0; i < count; ++i) {
+ mirror::CompressedReference<mirror::Object>* const root = roots[i];
+ if (!root->IsNull()) {
+ MarkRoot(root);
+ }
+ }
+}
+
// Fill the given memory block with a dummy object. Used to fill in a
// copy of objects that was lost in race.
void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index f1317b8..a4fd71c 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -62,14 +62,15 @@
ConcurrentCopying(Heap* heap, const std::string& name_prefix = "");
~ConcurrentCopying();
- virtual void RunPhases() OVERRIDE;
- void InitializePhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void FinishPhase();
+ virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ void InitializePhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ void FinishPhase() REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
- void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::heap_bitmap_lock_);
virtual GcType GetGcType() const OVERRIDE {
return kGcTypePartial;
}
@@ -85,14 +86,15 @@
return region_space_;
}
void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsInToSpace(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsInToSpace(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(ref != nullptr);
return IsMarked(ref) == ref;
}
- mirror::Object* Mark(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* Mark(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
bool IsMarking() const {
return is_marking_;
}
@@ -105,68 +107,79 @@
bool IsWeakRefAccessEnabled() {
return weak_ref_access_enabled_.LoadRelaxed();
}
- void RevokeThreadLocalMarkStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void RevokeThreadLocalMarkStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
private:
- void PushOntoMarkStack(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Object* Copy(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Scan(mirror::Object* to_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void PushOntoMarkStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
+ mirror::Object* Copy(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!skipped_blocks_lock_, !mark_stack_lock_);
+ void Scan(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
void Process(mirror::Object* obj, MemberOffset offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_);
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ void MarkRoot(mirror::CompressedReference<mirror::Object>* root)
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VerifyNoFromSpaceReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
accounting::ObjectStack* GetAllocationStack();
accounting::ObjectStack* GetLiveStack();
- virtual void ProcessMarkStack() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool ProcessMarkStackOnce() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ProcessMarkStackRef(mirror::Object* to_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void ProcessMarkStack() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
+ bool ProcessMarkStackOnce() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ void ProcessMarkStackRef(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SwitchToSharedMarkStackMode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SwitchToGcExclusiveMarkStackMode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ void SwitchToSharedMarkStackMode() SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
+ void SwitchToGcExclusiveMarkStackMode() SHARED_REQUIRES(Locks::mutator_lock_);
virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ProcessReferences(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ void ProcessReferences(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
void Sweep(bool swap_bitmaps)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
void SweepLargeObjects(bool swap_bitmaps)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
void ClearBlackPtrs()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Object* AllocateInSkippedBlock(size_t alloc_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CheckEmptyMarkStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void IssueEmptyCheckpoint() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsOnAllocStack(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!skipped_blocks_lock_);
+ void CheckEmptyMarkStack() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ void IssueEmptyCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsOnAllocStack(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Object* GetFwdPtr(mirror::Object* from_ref)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void FlipThreadRoots() LOCKS_EXCLUDED(Locks::mutator_lock_);
- void SwapStacks(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_);
+ void SwapStacks(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
void RecordLiveStackFreezeSize(Thread* self);
void ComputeUnevacFromSpaceLiveRatio();
void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ReenableWeakRefAccess(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ void ReenableWeakRefAccess(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
space::RegionSpace* region_space_; // The underlying region space.
std::unique_ptr<Barrier> gc_barrier_;
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index cfc4f96..954c80e 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -142,7 +142,7 @@
virtual GcType GetGcType() const = 0;
virtual CollectorType GetCollectorType() const = 0;
// Run the garbage collector.
- void Run(GcCause gc_cause, bool clear_soft_references);
+ void Run(GcCause gc_cause, bool clear_soft_references) REQUIRES(!pause_histogram_lock_);
Heap* GetHeap() const {
return heap_;
}
@@ -150,11 +150,11 @@
const CumulativeLogger& GetCumulativeTimings() const {
return cumulative_timings_;
}
- void ResetCumulativeStatistics();
+ void ResetCumulativeStatistics() REQUIRES(!pause_histogram_lock_);
// Swap the live and mark bitmaps of spaces that are active for the collector. For partial GC,
// this is the allocation space, for full GC then we swap the zygote bitmaps too.
- void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- uint64_t GetTotalPausedTimeNs() LOCKS_EXCLUDED(pause_histogram_lock_);
+ void SwapBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
+ uint64_t GetTotalPausedTimeNs() REQUIRES(!pause_histogram_lock_);
int64_t GetTotalFreedBytes() const {
return total_freed_bytes_;
}
@@ -162,7 +162,7 @@
return total_freed_objects_;
}
// Reset the cumulative timings and pause histogram.
- void ResetMeasurements();
+ void ResetMeasurements() REQUIRES(!pause_histogram_lock_);
// Returns the estimated throughput in bytes / second.
uint64_t GetEstimatedMeanThroughput() const;
// Returns how many GC iterations have been run.
@@ -179,23 +179,23 @@
void RecordFree(const ObjectBytePair& freed);
// Record a free of large objects.
void RecordFreeLOS(const ObjectBytePair& freed);
- void DumpPerformanceInfo(std::ostream& os) LOCKS_EXCLUDED(pause_histogram_lock_);
+ void DumpPerformanceInfo(std::ostream& os) REQUIRES(!pause_histogram_lock_);
// Helper functions for querying if objects are marked. These are used for processing references,
// and will be used for reading system weaks while the GC is running.
virtual mirror::Object* IsMarked(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
// Used by reference processor.
- virtual void ProcessMarkStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ virtual void ProcessMarkStack() SHARED_REQUIRES(Locks::mutator_lock_) = 0;
// Force mark an object.
virtual mirror::Object* MarkObject(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
protected:
// Run all of the GC phases.
diff --git a/runtime/gc/collector/immune_region.h b/runtime/gc/collector/immune_region.h
index 30144f0..3ead501 100644
--- a/runtime/gc/collector/immune_region.h
+++ b/runtime/gc/collector/immune_region.h
@@ -41,7 +41,7 @@
ImmuneRegion();
void Reset();
bool AddContinuousSpace(space::ContinuousSpace* space)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_);
bool ContainsSpace(const space::ContinuousSpace* space) const;
// Returns true if an object is inside of the immune region (assumed to be marked).
bool ContainsObject(const mirror::Object* obj) const ALWAYS_INLINE {
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 0623fd4..4b2c588 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -89,7 +89,7 @@
public:
explicit CalculateObjectForwardingAddressVisitor(MarkCompact* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_,
+ void operator()(mirror::Object* obj) const REQUIRES(Locks::mutator_lock_,
Locks::heap_bitmap_lock_) {
DCHECK_ALIGNED(obj, space::BumpPointerSpace::kAlignment);
DCHECK(collector_->IsMarked(obj) != nullptr);
@@ -301,8 +301,8 @@
}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ OVERRIDE REQUIRES(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mirror::Object* obj = *roots[i];
mirror::Object* new_obj = collector_->GetMarkedForwardAddress(obj);
@@ -315,8 +315,8 @@
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ OVERRIDE REQUIRES(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mirror::Object* obj = roots[i]->AsMirrorPtr();
mirror::Object* new_obj = collector_->GetMarkedForwardAddress(obj);
@@ -335,8 +335,8 @@
public:
explicit UpdateObjectReferencesVisitor(MarkCompact* collector) : collector_(collector) {
}
- void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
collector_->UpdateObjectReferences(obj);
}
@@ -428,16 +428,29 @@
}
void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
- ALWAYS_INLINE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ ALWAYS_INLINE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
collector_->UpdateHeapReference(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
}
void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
collector_->UpdateHeapReference(
ref->GetFieldObjectReferenceAddr<kVerifyNone>(mirror::Reference::ReferentOffset()));
}
+ // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ root->Assign(collector_->GetMarkedForwardAddress(root->AsMirrorPtr()));
+ }
+
private:
MarkCompact* const collector_;
};
@@ -491,8 +504,8 @@
public:
explicit MoveObjectVisitor(MarkCompact* collector) : collector_(collector) {
}
- void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
collector_->MoveObject(obj, obj->SizeOf());
}
@@ -564,17 +577,30 @@
}
void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// Object was already verified when we scanned it.
collector_->MarkObject(obj->GetFieldObject<mirror::Object, kVerifyNone>(offset));
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_) {
collector_->DelayReferenceReferent(klass, ref);
}
+ // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ collector_->MarkObject(root->AsMirrorPtr());
+ }
+
private:
MarkCompact* const collector_;
};
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 89d66b5..8d91939 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -64,13 +64,13 @@
virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
void InitializePhase();
- void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
- void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
- void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void MarkingPhase() REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::heap_bitmap_lock_);
+ void ReclaimPhase() REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::heap_bitmap_lock_);
+ void FinishPhase() REQUIRES(Locks::mutator_lock_);
void MarkReachableObjects()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual GcType GetGcType() const OVERRIDE {
return kGcTypePartial;
}
@@ -88,106 +88,106 @@
void FindDefaultMarkBitmap();
void ScanObject(mirror::Object* obj)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Marks the root set at the start of a garbage collection.
void MarkRoots()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
// the image. Mark that portion of the heap as immune.
- void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::heap_bitmap_lock_);
void UnBindBitmaps()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_);
- void ProcessReferences(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ProcessReferences(Thread* self) REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::mutator_lock_);
// Sweeps unmarked objects to complete the garbage collection.
- void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void Sweep(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_);
// Sweeps unmarked objects to complete the garbage collection.
- void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_);
void SweepSystemWeaks()
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
- OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ OVERRIDE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info)
- OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ OVERRIDE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Schedules an unmarked object for reference processing.
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
mirror::Object* GetMarkedForwardAddress(mirror::Object* object)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_);
// Marks or unmarks a large object based on whether or not set is true. If set is true, then we
// mark, otherwise we unmark.
bool MarkLargeObject(const mirror::Object* obj)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Expand mark stack to 2x its current size.
- void ResizeMarkStack(size_t new_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ResizeMarkStack(size_t new_size) SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if we should sweep the space.
bool ShouldSweepSpace(space::ContinuousSpace* space) const;
// Push an object onto the mark stack.
- void MarkStackPush(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void MarkStackPush(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
void UpdateAndMarkModUnion()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Recursively blackens objects on the mark stack.
void ProcessMarkStack()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// 3 pass mark compact approach.
- void Compact() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ void Compact() REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Calculate the forwarding address of objects marked as "live" in the objects_before_forwarding
// bitmap.
void CalculateObjectForwardingAddresses()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Update the references of objects by using the forwarding addresses.
- void UpdateReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ void UpdateReferences() REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Move objects and restore lock words.
- void MoveObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void MoveObjects() REQUIRES(Locks::mutator_lock_);
// Move a single object to its forward address.
- void MoveObject(mirror::Object* obj, size_t len) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void MoveObject(mirror::Object* obj, size_t len) REQUIRES(Locks::mutator_lock_);
// Mark a single object.
virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) OVERRIDE
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(Locks::mutator_lock_);
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ForwardObject(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(Locks::mutator_lock_);
+ void ForwardObject(mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_,
Locks::mutator_lock_);
// Update a single heap reference.
void UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(Locks::mutator_lock_);
// Update all of the references of a single object.
void UpdateObjectReferences(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(Locks::mutator_lock_);
// Revoke all the thread-local buffers.
void RevokeAllThreadLocalBuffers();
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index abb1d3d..7f2c204 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -365,7 +365,7 @@
: mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {
}
- void operator()(const mirror::Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
if (kProfileLargeObjects) {
// TODO: Differentiate between marking and testing somehow.
++mark_sweep_->large_object_test_;
@@ -522,7 +522,7 @@
explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
}
@@ -547,7 +547,7 @@
class VerifyRootVisitor : public SingleRootVisitor {
public:
void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// See if the root is on any space bitmap.
auto* heap = Runtime::Current()->GetHeap();
if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
@@ -597,8 +597,7 @@
: mark_sweep_(mark_sweep) {}
void operator()(mirror::Object* obj) const ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -616,8 +615,8 @@
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_) {
collector_->DelayReferenceReferent(klass, ref);
}
@@ -649,13 +648,33 @@
protected:
class MarkObjectParallelVisitor {
public:
- explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
- MarkSweep* mark_sweep) ALWAYS_INLINE
- : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
+ ALWAYS_INLINE explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
+ MarkSweep* mark_sweep)
+ : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const
+ ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ Mark(obj->GetFieldObject<mirror::Object>(offset));
+ }
+
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (kCheckLocks) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ Mark(root->AsMirrorPtr());
+ }
+
+ private:
+ void Mark(mirror::Object* ref) const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
if (kUseFinger) {
std::atomic_thread_fence(std::memory_order_seq_cst);
@@ -668,7 +687,6 @@
}
}
- private:
MarkStackTask<kUseFinger>* const chunk_task_;
MarkSweep* const mark_sweep_;
};
@@ -679,8 +697,8 @@
: chunk_task_(chunk_task) {}
// No thread safety analysis since multiple threads will use this visitor.
- void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_) {
MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
DelayReferenceReferentVisitor ref_visitor(mark_sweep);
@@ -707,7 +725,7 @@
size_t mark_stack_pos_;
ALWAYS_INLINE void MarkStackPush(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
// Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
mark_stack_pos_ /= 2;
@@ -725,8 +743,8 @@
}
// Scans all of the objects
- virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ virtual void Run(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_) {
UNUSED(self);
ScanObjectParallelVisitor visitor(this);
// TODO: Tune this.
@@ -1015,7 +1033,7 @@
explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
mark_sweep_->VerifyIsLive(obj);
return obj;
}
@@ -1048,8 +1066,8 @@
}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
}
@@ -1057,8 +1075,8 @@
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
}
@@ -1259,8 +1277,8 @@
}
void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
- ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -1268,6 +1286,22 @@
mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset);
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ if (kCheckLocks) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ mark_sweep_->MarkObject(root->AsMirrorPtr());
+ }
+
private:
MarkSweep* const mark_sweep_;
};
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 7692b06..606be63 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -58,15 +58,14 @@
~MarkSweep() {}
- virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
+ virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_);
void InitializePhase();
- void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void PausePhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void FinishPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ void PausePhase() REQUIRES(Locks::mutator_lock_, !mark_stack_lock_);
+ void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ void FinishPhase();
virtual void MarkReachableObjects()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
bool IsConcurrent() const {
return is_concurrent_;
@@ -88,113 +87,96 @@
// Marks all objects in the root set at the start of a garbage collection.
void MarkRoots(Thread* self)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
void MarkNonThreadRoots()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
void MarkConcurrentRoots(VisitRootFlags flags)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
void MarkRootsCheckpoint(Thread* self, bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
// Builds a mark stack and recursively mark until it empties.
void RecursiveMark()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
// Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
// the image. Mark that portion of the heap as immune.
- virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_);
// Builds a mark stack with objects on dirty cards and recursively mark until it empties.
void RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
// Remarks the root set after completing the concurrent mark.
void ReMarkRoots()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
void ProcessReferences(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
// Update and mark references from immune spaces.
void UpdateAndMarkModUnion()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
// Pre clean cards to reduce how much work is needed in the pause.
void PreCleanCards()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
// Sweeps unmarked objects to complete the garbage collection. Virtual as by default it sweeps
// all allocation spaces. Partial and sticky GCs want to just sweep a subset of the heap.
- virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void Sweep(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Sweeps unmarked objects to complete the garbage collection.
- void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_);
// Sweep only pointers within an array. WARNING: Trashes objects.
void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
// Blackens an object.
void ScanObject(mirror::Object* obj)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
// No thread safety analysis due to lambdas.
template<typename MarkVisitor, typename ReferenceVisitor>
void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor,
const ReferenceVisitor& ref_visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
void SweepSystemWeaks(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void VerifySystemWeaks()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Verify that an object is live, either in a live bitmap or in the allocation stack.
void VerifyIsLive(const mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
// Marks an object.
virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
void MarkObject(mirror::Object* obj, mirror::Object* holder, MemberOffset offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
Barrier& GetBarrier() {
return *gc_barrier_;
@@ -202,21 +184,20 @@
// Schedules an unmarked object for reference processing.
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
// Returns object if the object is marked in the heap bitmap, otherwise null.
virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_);
void MarkObjectNonNull(mirror::Object* obj, mirror::Object* holder = nullptr,
MemberOffset offset = MemberOffset(0))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
// Marks an object atomically, safe to use from multiple threads.
void MarkObjectNonNullParallel(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
// Returns true if we need to add obj to a mark stack.
bool MarkObjectParallel(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
@@ -227,36 +208,34 @@
NO_THREAD_SAFETY_ANALYSIS;
// Expand mark stack to 2x its current size.
- void ExpandMarkStack() EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ResizeMarkStack(size_t new_size) EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ExpandMarkStack() REQUIRES(mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ void ResizeMarkStack(size_t new_size) REQUIRES(mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns how many threads we should use for the current GC phase based on if we are paused,
// whether or not we care about pauses.
size_t GetThreadCount(bool paused) const;
// Push a single reference on a mark stack.
- void PushOnMarkStack(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void PushOnMarkStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
// Blackens objects grayed during a garbage collection.
void ScanGrayObjects(bool paused, uint8_t minimum_age)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
- virtual void ProcessMarkStack() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ virtual void ProcessMarkStack() OVERRIDE REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ProcessMarkStack(false);
}
// Recursively blackens objects on the mark stack.
void ProcessMarkStack(bool paused)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
void ProcessMarkStackParallel(size_t thread_count)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
// Used to Get around thread safety annotations. The call is from MarkingPhase and is guarded by
// IsExclusiveHeld.
diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h
index 1a211cd..7b69bce 100644
--- a/runtime/gc/collector/partial_mark_sweep.h
+++ b/runtime/gc/collector/partial_mark_sweep.h
@@ -37,7 +37,7 @@
// Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial
// collections, ie the Zygote space. Also mark this space is immune. Virtual as overridden by
// StickyMarkSweep.
- virtual void BindBitmaps() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void BindBitmaps() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PartialMarkSweep);
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index a7de44f..06d20f5 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -83,6 +83,14 @@
}
}
+template<bool kPoisonReferences>
+inline void SemiSpace::MarkObjectIfNotInToSpace(
+ mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr) {
+ if (!to_space_->HasAddress(obj_ptr->AsMirrorPtr())) {
+ MarkObject(obj_ptr);
+ }
+}
+
} // namespace collector
} // namespace gc
} // namespace art
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 2a9f47a..63def24 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -273,8 +273,7 @@
class SemiSpaceScanObjectVisitor {
public:
explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
- void operator()(Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_,
- Locks::heap_bitmap_lock_) {
+ void operator()(Object* obj) const REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
DCHECK(obj != nullptr);
semi_space_->ScanObject(obj);
}
@@ -289,15 +288,33 @@
from_space_(from_space) {}
void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (from_space_->HasAddress(ref)) {
Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj);
LOG(FATAL) << ref << " found in from space";
}
}
+
+ // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (kIsDebugBuild) {
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ CHECK(!from_space_->HasAddress(root->AsMirrorPtr()));
+ }
+
private:
- space::ContinuousMemMapAllocSpace* from_space_;
+ space::ContinuousMemMapAllocSpace* const from_space_;
};
void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) {
@@ -310,10 +327,11 @@
public:
explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
void operator()(Object* obj) const
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
DCHECK(obj != nullptr);
semi_space_->VerifyNoFromSpaceReferences(obj);
}
+
private:
SemiSpace* const semi_space_;
};
@@ -665,17 +683,35 @@
}
void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// Object was already verified when we scanned it.
collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
collector_->DelayReferenceReferent(klass, ref);
}
+ // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (kIsDebugBuild) {
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ // We may visit the same root multiple times, so avoid marking things in the to-space since
+ // this is not handled by the GC.
+ collector_->MarkObjectIfNotInToSpace(root);
+ }
+
private:
SemiSpace* const collector_;
};
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 6b7ea0d..b9246ca 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -66,13 +66,13 @@
virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
virtual void InitializePhase();
- virtual void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
- virtual void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
- virtual void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void MarkingPhase() REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::heap_bitmap_lock_);
+ virtual void ReclaimPhase() REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::heap_bitmap_lock_);
+ virtual void FinishPhase() REQUIRES(Locks::mutator_lock_);
void MarkReachableObjects()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual GcType GetGcType() const OVERRIDE {
return kGcTypePartial;
}
@@ -101,94 +101,98 @@
// Updates obj_ptr if the object has moved.
template<bool kPoisonReferences>
void MarkObject(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ template<bool kPoisonReferences>
+ void MarkObjectIfNotInToSpace(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr)
+ REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
virtual mirror::Object* MarkObject(mirror::Object* root) OVERRIDE
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) OVERRIDE
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void ScanObject(mirror::Object* obj)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void VerifyNoFromSpaceReferences(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Marks the root set at the start of a garbage collection.
void MarkRoots()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
// the image. Mark that portion of the heap as immune.
- virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ virtual void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::heap_bitmap_lock_);
void UnBindBitmaps()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_);
- void ProcessReferences(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ProcessReferences(Thread* self) REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::mutator_lock_);
// Sweeps unmarked objects to complete the garbage collection.
- virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ virtual void Sweep(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_);
// Sweeps unmarked objects to complete the garbage collection.
- void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_);
void SweepSystemWeaks()
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info) OVERRIDE
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Schedules an unmarked object for reference processing.
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_);
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) OVERRIDE
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_);
// Marks or unmarks a large object based on whether or not set is true. If set is true, then we
// mark, otherwise we unmark.
bool MarkLargeObject(const mirror::Object* obj)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Expand mark stack to 2x its current size.
- void ResizeMarkStack(size_t new_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ResizeMarkStack(size_t new_size) SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if we should sweep the space.
virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const;
// Push an object onto the mark stack.
- void MarkStackPush(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void MarkStackPush(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
void UpdateAndMarkModUnion()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Recursively blackens objects on the mark stack.
void ProcessMarkStack()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Revoke all the thread-local buffers.
void RevokeAllThreadLocalBuffers();
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index b9ef137..e8e70de 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -36,15 +36,15 @@
protected:
// Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the
// alloc space will be marked as immune.
- void BindBitmaps() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void BindBitmaps() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
void MarkReachableObjects() OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
void Sweep(bool swap_bitmaps) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(StickyMarkSweep);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 2b94cf1..5f617bd 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1277,7 +1277,7 @@
FinishGC(self, collector::kGcTypeNone);
size_t native_reclaimed = 0;
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
// Only trim the native heap if we don't care about pauses.
if (!CareAboutPauseTimes()) {
#if defined(USE_DLMALLOC)
@@ -1290,7 +1290,7 @@
UNIMPLEMENTED(WARNING) << "Add trimming support";
#endif
}
-#endif // HAVE_ANDROID_OS
+#endif // __ANDROID__
uint64_t end_ns = NanoTime();
VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
<< ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
@@ -1695,11 +1695,11 @@
class InstanceCounter {
public:
InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
}
static void Callback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
mirror::Class* instance_class = obj->GetClass();
CHECK(instance_class != nullptr);
@@ -1731,11 +1731,11 @@
class InstanceCollector {
public:
InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: class_(c), max_count_(max_count), instances_(instances) {
}
static void Callback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
DCHECK(arg != nullptr);
InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
if (obj->GetClass() == instance_collector->class_) {
@@ -1763,12 +1763,12 @@
public:
ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
std::vector<mirror::Object*>& referring_objects)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: object_(object), max_count_(max_count), referring_objects_(referring_objects) {
}
static void Callback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
}
@@ -1780,14 +1780,18 @@
}
// For Object::VisitReferences.
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
referring_objects_.push_back(obj);
}
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+ const {}
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+
private:
const mirror::Object* const object_;
const uint32_t max_count_;
@@ -2111,7 +2115,7 @@
const bool is_running_on_memory_tool_;
static void Callback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(arg != nullptr);
BinContext* context = reinterpret_cast<BinContext*>(arg);
ZygoteCompactingCollector* collector = context->collector_;
@@ -2139,7 +2143,7 @@
}
virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
size_t obj_size = obj->SizeOf();
size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
mirror::Object* forward_address;
@@ -2583,7 +2587,7 @@
explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
void VisitRoot(mirror::Object* root, const RootInfo& info)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
if (root == obj_) {
LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
}
@@ -2605,23 +2609,22 @@
class VerifyReferenceVisitor : public SingleRootVisitor {
public:
explicit VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
: heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
size_t GetFailureCount() const {
return fail_count_->LoadSequentiallyConsistent();
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- UNUSED(klass);
+ void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (verify_referent_) {
VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
}
}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
}
@@ -2629,8 +2632,20 @@
return heap_->IsLiveObjectLocked(obj, true, false, true);
}
- void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const_cast<VerifyReferenceVisitor*>(this)->VisitRoot(
+ root->AsMirrorPtr(), RootInfo(kRootVMInternal));
+ }
+
+ virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (root == nullptr) {
LOG(ERROR) << "Root is null with info " << root_info.GetType();
} else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
@@ -2747,8 +2762,8 @@
: heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {
}
- void operator()(mirror::Object* obj) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ void operator()(mirror::Object* obj)
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// Note: we are verifying the references in obj but not obj itself, this is because obj must
// be live or else how did we find it in the live bitmap?
VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
@@ -2757,13 +2772,12 @@
}
static void VisitCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
visitor->operator()(obj);
}
- void VerifyRoots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) {
+ void VerifyRoots() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
Runtime::Current()->VisitRoots(&visitor);
@@ -2855,11 +2869,16 @@
class VerifyReferenceCardVisitor {
public:
VerifyReferenceCardVisitor(Heap* heap, bool* failed)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
+ SHARED_REQUIRES(Locks::mutator_lock_,
Locks::heap_bitmap_lock_)
: heap_(heap), failed_(failed) {
}
+ // There is no card marks for native roots on a class.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+ const {}
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+
// TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
// annotalysis on visitors.
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
@@ -2932,7 +2951,7 @@
failed_(false) {}
void operator()(mirror::Object* obj) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
obj->VisitReferences<true>(visitor, VoidFunctor());
}
@@ -3425,7 +3444,7 @@
const bool force_full_; // If true, force full (or partial) collection.
};
-static bool CanAddHeapTask(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_) {
+static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) {
Runtime* runtime = Runtime::Current();
return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) &&
!self->IsHandlingStackOverflow();
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index ee3d510..09c18b8 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -188,26 +188,30 @@
template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes,
const PreFenceVisitor& pre_fence_visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
- GetCurrentAllocator(),
- pre_fence_visitor);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
+ !Roles::uninterruptible_) {
+ return AllocObjectWithAllocator<kInstrumented, true>(
+ self, klass, num_bytes, GetCurrentAllocator(), pre_fence_visitor);
}
template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes,
const PreFenceVisitor& pre_fence_visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
- GetCurrentNonMovingAllocator(),
- pre_fence_visitor);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
+ !Roles::uninterruptible_) {
+ return AllocObjectWithAllocator<kInstrumented, true>(
+ self, klass, num_bytes, GetCurrentNonMovingAllocator(), pre_fence_visitor);
}
template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(
Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator,
const PreFenceVisitor& pre_fence_visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
+ !Roles::uninterruptible_);
AllocatorType GetCurrentAllocator() const {
return current_allocator_;
@@ -219,29 +223,29 @@
// Visit all of the live objects in the heap.
void VisitObjects(ObjectCallback callback, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
void VisitObjectsPaused(ObjectCallback callback, void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- void RegisterNativeAllocation(JNIEnv* env, size_t bytes);
- void RegisterNativeFree(JNIEnv* env, size_t bytes);
+ void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+ void RegisterNativeFree(JNIEnv* env, size_t bytes)
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
// Change the allocator, updates entrypoints.
void ChangeAllocator(AllocatorType allocator)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+ REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_);
// Transition the garbage collector during runtime, may copy objects from one space to another.
- void TransitionCollector(CollectorType collector_type);
+ void TransitionCollector(CollectorType collector_type) REQUIRES(!*gc_complete_lock_);
// Change the collector to be one of the possible options (MS, CMS, SS).
void ChangeCollector(CollectorType collector_type)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::mutator_lock_);
// The given reference is believed to be to an object in the Java heap, check the soundness of it.
// TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
@@ -249,61 +253,64 @@
void VerifyObjectBody(mirror::Object* o) NO_THREAD_SAFETY_ANALYSIS;
// Check sanity of all live references.
- void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ void VerifyHeap() REQUIRES(!Locks::heap_bitmap_lock_);
// Returns how many failures occured.
size_t VerifyHeapReferences(bool verify_referents = true)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
bool VerifyMissingCardMarks()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
// and doesn't abort on error, allowing the caller to report more
// meaningful diagnostics.
bool IsValidObjectAddress(const mirror::Object* obj) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Faster alternative to IsHeapAddress since finding if an object is in the large object space is
// very slow.
bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
// Requires the heap lock to be held.
bool IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack = true,
bool search_live_stack = true, bool sorted = false)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Returns true if there is any chance that the object (obj) will move.
- bool IsMovableObject(const mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsMovableObject(const mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_);
// Enables us to compacting GC until objects are released.
- void IncrementDisableMovingGC(Thread* self);
- void DecrementDisableMovingGC(Thread* self);
+ void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
+ void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
// Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
- void ClearMarkedObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void ClearMarkedObjects() REQUIRES(Locks::heap_bitmap_lock_);
// Initiates an explicit garbage collection.
- void CollectGarbage(bool clear_soft_references);
+ void CollectGarbage(bool clear_soft_references)
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
// Does a concurrent GC, should only be called by the GC daemon thread
// through runtime.
- void ConcurrentGC(Thread* self, bool force_full) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+ void ConcurrentGC(Thread* self, bool force_full)
+ REQUIRES(!Locks::runtime_shutdown_lock_, !*gc_complete_lock_, !*pending_task_lock_);
// Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
// The boolean decides whether to use IsAssignableFrom or == when comparing classes.
void CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
uint64_t* counts)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Implements JDWP RT_Instances.
void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Implements JDWP OR_ReferringObjects.
- void GetReferringObjects(mirror::Object* o, int32_t max_count, std::vector<mirror::Object*>& referring_objects)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void GetReferringObjects(mirror::Object* o, int32_t max_count,
+ std::vector<mirror::Object*>& referring_objects)
+ REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
// implement dalvik.system.VMRuntime.clearGrowthLimit.
@@ -311,7 +318,7 @@
// Make the current growth limit the new maximum capacity, unmaps pages at the end of spaces
// which will never be used. Used to implement dalvik.system.VMRuntime.clampGrowthLimit.
- void ClampGrowthLimit() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ void ClampGrowthLimit() REQUIRES(!Locks::heap_bitmap_lock_);
// Target ideal heap utilization ratio, implements
// dalvik.system.VMRuntime.getTargetHeapUtilization.
@@ -326,9 +333,9 @@
// Set the heap's private space pointers to be the same as the space based on it's type. Public
// due to usage by tests.
void SetSpaceAsDefault(space::ContinuousSpace* continuous_space)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
- void AddSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
- void RemoveSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ REQUIRES(!Locks::heap_bitmap_lock_);
+ void AddSpace(space::Space* space) REQUIRES(!Locks::heap_bitmap_lock_);
+ void RemoveSpace(space::Space* space) REQUIRES(!Locks::heap_bitmap_lock_);
// Set target ideal heap utilization ratio, implements
// dalvik.system.VMRuntime.setTargetHeapUtilization.
@@ -341,10 +348,11 @@
// Blocks the caller until the garbage collector becomes idle and returns the type of GC we
// waited for.
collector::GcType WaitForGcToComplete(GcCause cause, Thread* self)
- LOCKS_EXCLUDED(gc_complete_lock_);
+ REQUIRES(!*gc_complete_lock_);
// Update the heap's process state to a new value, may cause compaction to occur.
- void UpdateProcessState(ProcessState process_state);
+ void UpdateProcessState(ProcessState process_state)
+ REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const {
return continuous_spaces_;
@@ -428,7 +436,7 @@
}
// Returns the number of objects currently allocated.
- size_t GetObjectsAllocated() const LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ size_t GetObjectsAllocated() const REQUIRES(!Locks::heap_bitmap_lock_);
// Returns the total number of objects allocated since the heap was created.
uint64_t GetObjectsAllocatedEver() const;
@@ -487,13 +495,13 @@
bool fail_ok) const;
space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const;
- void DumpForSigQuit(std::ostream& os);
+ void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
// Do a pending collector transition.
- void DoPendingCollectorTransition();
+ void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_);
// Deflate monitors, ... and trim the spaces.
- void Trim(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
+ void Trim(Thread* self) REQUIRES(!*gc_complete_lock_);
void RevokeThreadLocalBuffers(Thread* thread);
void RevokeRosAllocThreadLocalBuffers(Thread* thread);
@@ -501,17 +509,17 @@
void AssertThreadLocalBuffersAreRevoked(Thread* thread);
void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
void RosAllocVerification(TimingLogger* timings, const char* name)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::mutator_lock_);
- accounting::HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ accounting::HeapBitmap* GetLiveBitmap() SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
return live_bitmap_.get();
}
- accounting::HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ accounting::HeapBitmap* GetMarkBitmap() SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
return mark_bitmap_.get();
}
- accounting::ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ accounting::ObjectStack* GetLiveStack() SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
return live_stack_.get();
}
@@ -519,13 +527,12 @@
// Mark and empty stack.
void FlushAllocStack()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
// Revoke all the thread-local allocation stacks.
void RevokeAllThreadLocalAllocationStacks(Thread* self)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_);
+ REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_);
// Mark all the objects in the allocation stack in the specified bitmap.
// TODO: Refactor?
@@ -533,23 +540,21 @@
accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
accounting::ObjectStack* stack)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
// Mark the specified allocation stack as live.
void MarkAllocStackAsLive(accounting::ObjectStack* stack)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
// Unbind any bound bitmaps.
- void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void UnBindBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
// DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
// Assumes there is only one image space.
space::ImageSpace* GetImageSpace() const;
// Permenantly disable moving garbage collection.
- void DisableMovingGc();
+ void DisableMovingGc() REQUIRES(!*gc_complete_lock_);
space::DlMallocSpace* GetDlMallocSpace() const {
return dlmalloc_space_;
@@ -595,8 +600,8 @@
std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
// GC performance measuring
- void DumpGcPerformanceInfo(std::ostream& os);
- void ResetGcPerformanceInfo();
+ void DumpGcPerformanceInfo(std::ostream& os) REQUIRES(!*gc_complete_lock_);
+ void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
// Returns true if we currently care about pause times.
bool CareAboutPauseTimes() const {
@@ -656,16 +661,16 @@
return false;
}
- bool IsMovingGCDisabled(Thread* self) {
+ bool IsMovingGCDisabled(Thread* self) REQUIRES(!*gc_complete_lock_) {
MutexLock mu(self, *gc_complete_lock_);
return disable_moving_gc_count_ > 0;
}
// Request an asynchronous trim.
- void RequestTrim(Thread* self) LOCKS_EXCLUDED(pending_task_lock_);
+ void RequestTrim(Thread* self) REQUIRES(!*pending_task_lock_);
// Request asynchronous GC.
- void RequestConcurrentGC(Thread* self, bool force_full) LOCKS_EXCLUDED(pending_task_lock_);
+ void RequestConcurrentGC(Thread* self, bool force_full) REQUIRES(!*pending_task_lock_);
// Whether or not we may use a garbage collector, used so that we only create collectors we need.
bool MayUseCollector(CollectorType type) const;
@@ -680,8 +685,8 @@
uint64_t GetGcTime() const;
uint64_t GetBlockingGcCount() const;
uint64_t GetBlockingGcTime() const;
- void DumpGcCountRateHistogram(std::ostream& os) const;
- void DumpBlockingGcCountRateHistogram(std::ostream& os) const;
+ void DumpGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
+ void DumpBlockingGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
// Allocation tracking support
// Callers to this function use double-checked locking to ensure safety on allocation_records_
@@ -689,33 +694,33 @@
return alloc_tracking_enabled_.LoadRelaxed();
}
- void SetAllocTrackingEnabled(bool enabled) EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+ void SetAllocTrackingEnabled(bool enabled) REQUIRES(Locks::alloc_tracker_lock_) {
alloc_tracking_enabled_.StoreRelaxed(enabled);
}
AllocRecordObjectMap* GetAllocationRecords() const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+ REQUIRES(Locks::alloc_tracker_lock_) {
return allocation_records_.get();
}
void SetAllocationRecords(AllocRecordObjectMap* records)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
+ REQUIRES(Locks::alloc_tracker_lock_);
void VisitAllocationRecords(RootVisitor* visitor) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::alloc_tracker_lock_);
void SweepAllocationRecords(IsMarkedVisitor* visitor) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::alloc_tracker_lock_);
void DisallowNewAllocationRecords() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::alloc_tracker_lock_);
void AllowNewAllocationRecords() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::alloc_tracker_lock_);
private:
class ConcurrentGCTask;
@@ -726,10 +731,10 @@
collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space,
space::ContinuousMemMapAllocSpace* source_space,
GcCause gc_cause)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::mutator_lock_);
void LogGC(GcCause gc_cause, collector::GarbageCollector* collector);
- void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
+ void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
// Create a mem map with a preferred base address.
static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
@@ -758,10 +763,10 @@
collector_type == kCollectorTypeHomogeneousSpaceCompact;
}
bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
mirror::Object** obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
accounting::ObjectStack* GetMarkStack() {
return mark_stack_.get();
@@ -771,7 +776,8 @@
template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocLargeObject(Thread* self, mirror::Class** klass, size_t byte_count,
const PreFenceVisitor& pre_fence_visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
// Handles Allocate()'s slow allocation path with GC involved after
// an initial allocation attempt failed.
@@ -779,17 +785,17 @@
size_t* bytes_allocated, size_t* usable_size,
size_t* bytes_tl_bulk_allocated,
mirror::Class** klass)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Allocate into a specific space.
mirror::Object* AllocateInto(Thread* self, space::AllocSpace* space, mirror::Class* c,
size_t bytes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
// wrong space.
- void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SwapSemiSpaces() REQUIRES(Locks::mutator_lock_);
// Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
// that the switch statement is constant optimized in the entrypoints.
@@ -798,17 +804,17 @@
size_t alloc_size, size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template <bool kGrow>
ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
// Returns true if the address passed in is within the address range of a continuous space.
bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
void RunFinalization(JNIEnv* env, uint64_t timeout);
@@ -816,36 +822,34 @@
// Blocks the caller until the garbage collector becomes idle and returns the type of GC we
// waited for.
collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self)
- EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
+ REQUIRES(gc_complete_lock_);
void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
- LOCKS_EXCLUDED(pending_task_lock_);
+ REQUIRES(!*pending_task_lock_);
void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*pending_task_lock_);
bool IsGCRequestPending() const;
// Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
// which type of Gc was actually ran.
collector::GcType CollectGarbageInternal(collector::GcType gc_plan, GcCause gc_cause,
bool clear_soft_references)
- LOCKS_EXCLUDED(gc_complete_lock_,
- Locks::heap_bitmap_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!*gc_complete_lock_, !Locks::heap_bitmap_lock_, !Locks::thread_suspend_count_lock_,
+ !*pending_task_lock_);
void PreGcVerification(collector::GarbageCollector* gc)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
void PreGcVerificationPaused(collector::GarbageCollector* gc)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
void PrePauseRosAllocVerification(collector::GarbageCollector* gc)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::mutator_lock_);
void PreSweepingGcVerification(collector::GarbageCollector* gc)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
void PostGcVerification(collector::GarbageCollector* gc)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
void PostGcVerificationPaused(collector::GarbageCollector* gc)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
// Update the watermark for the native allocated bytes based on the current number of native
// bytes allocated and the target utilization ratio.
@@ -855,7 +859,7 @@
collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
// Create a new alloc space and compact default alloc space to it.
- HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact();
+ HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() REQUIRES(!*gc_complete_lock_);
// Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
void CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
@@ -876,10 +880,10 @@
size_t GetPercentFree();
static void VerificationCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_);
// Swap the allocation stack with the live stack.
- void SwapStacks(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SwapStacks(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
// Clear cards and update the mod union table. When process_alloc_space_cards is true,
// if clear_alloc_space_cards is true, then we clear cards instead of ageing them. We do
@@ -889,15 +893,15 @@
// Push an object onto the allocation stack.
void PushOnAllocationStack(Thread* self, mirror::Object** obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
void PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, mirror::Object** obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
void ClearConcurrentGCRequest();
- void ClearPendingTrim(Thread* self) LOCKS_EXCLUDED(pending_task_lock_);
- void ClearPendingCollectorTransition(Thread* self) LOCKS_EXCLUDED(pending_task_lock_);
+ void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_);
+ void ClearPendingCollectorTransition(Thread* self) REQUIRES(!*pending_task_lock_);
// What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
// sweep GC, false for other GC types.
@@ -906,23 +910,23 @@
}
// Trim the managed and native spaces by releasing unused memory back to the OS.
- void TrimSpaces(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
+ void TrimSpaces(Thread* self) REQUIRES(!*gc_complete_lock_);
// Trim 0 pages at the end of reference tables.
void TrimIndirectReferenceTables(Thread* self);
void VisitObjectsInternal(ObjectCallback callback, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
void VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
- void UpdateGcCountRateHistograms() EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
+ void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_);
// GC stress mode attempts to do one GC per unique backtrace.
void CheckGcStressMode(Thread* self, mirror::Object** obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
// All-known continuous spaces, where objects lie within fixed bounds.
std::vector<space::ContinuousSpace*> continuous_spaces_;
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index 95877d1..d9dfedb 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -48,39 +48,39 @@
explicit ReferenceProcessor();
void ProcessReferences(bool concurrent, TimingLogger* timings, bool clear_soft_references,
gc::collector::GarbageCollector* collector)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- LOCKS_EXCLUDED(Locks::reference_processor_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!Locks::reference_processor_lock_);
// The slow path bool is contained in the reference class object, can only be set once
// Only allow setting this with mutators suspended so that we can avoid using a lock in the
// GetReferent fast path as an optimization.
- void EnableSlowPath() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void EnableSlowPath() SHARED_REQUIRES(Locks::mutator_lock_);
void BroadcastForSlowPath(Thread* self);
// Decode the referent, may block if references are being processed.
mirror::Object* GetReferent(Thread* self, mirror::Reference* reference)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
- void EnqueueClearedReferences(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::reference_processor_lock_);
+ void EnqueueClearedReferences(Thread* self) REQUIRES(!Locks::mutator_lock_);
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
collector::GarbageCollector* collector)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void UpdateRoots(IsMarkedVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Make a circular list with reference if it is not enqueued. Uses the finalizer queue lock.
bool MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::reference_processor_lock_,
- Locks::reference_queue_finalizer_references_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::reference_processor_lock_,
+ !Locks::reference_queue_finalizer_references_lock_);
private:
- bool SlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool SlowPathEnabled() SHARED_REQUIRES(Locks::mutator_lock_);
// Called by ProcessReferences.
- void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::reference_processor_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void DisableSlowPath(Thread* self) REQUIRES(Locks::reference_processor_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// If we are preserving references it means that some dead objects may become live, we use start
// and stop preserving to block mutators using GetReferrent from getting access to these
// referents.
- void StartPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
- void StopPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
+ void StartPreservingReferences(Thread* self) REQUIRES(!Locks::reference_processor_lock_);
+ void StopPreservingReferences(Thread* self) REQUIRES(!Locks::reference_processor_lock_);
// Collector which is clearing references, used by the GetReferent to return referents which are
// already marked.
collector::GarbageCollector* collector_ GUARDED_BY(Locks::reference_processor_lock_);
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 7d9ddf6..aabac97 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -22,6 +22,7 @@
#include <vector>
#include "atomic.h"
+#include "base/mutex.h"
#include "base/timing_logger.h"
#include "globals.h"
#include "jni.h"
@@ -53,39 +54,39 @@
// since it uses a lock to avoid a race between checking for the references presence and adding
// it.
void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*lock_);
// Enqueue a reference, unlike EnqueuePendingReference, enqueue reference checks that the
// reference IsEnqueueable. Not thread safe, used when mutators are paused to minimize lock
// overhead.
- void EnqueueReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void EnqueueReference(mirror::Reference* ref) SHARED_REQUIRES(Locks::mutator_lock_);
// Enqueue a reference without checking that it is enqueable.
- void EnqueuePendingReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void EnqueuePendingReference(mirror::Reference* ref) SHARED_REQUIRES(Locks::mutator_lock_);
// Dequeue the first reference (returns list_).
- mirror::Reference* DequeuePendingReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Reference* DequeuePendingReference() SHARED_REQUIRES(Locks::mutator_lock_);
// Enqueues finalizer references with white referents. White referents are blackened, moved to
// the zombie field, and the referent field is cleared.
void EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
collector::GarbageCollector* collector)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Walks the reference list marking any references subject to the reference clearing policy.
// References with a black referent are removed from the list. References with white referents
// biased toward saving are blackened and also removed from the list.
void ForwardSoftReferences(MarkObjectVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Unlink the reference list clearing references objects with white referents. Cleared references
// registered to a reference queue are scheduled for appending by the heap worker thread.
void ClearWhiteReferences(ReferenceQueue* cleared_references,
collector::GarbageCollector* collector)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- size_t GetLength() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t GetLength() const SHARED_REQUIRES(Locks::mutator_lock_);
bool IsEmpty() const {
return list_ == nullptr;
@@ -93,13 +94,13 @@
void Clear() {
list_ = nullptr;
}
- mirror::Reference* GetList() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Reference* GetList() SHARED_REQUIRES(Locks::mutator_lock_) {
return list_;
}
// Visits list_, currently only used for the mark compact GC.
void UpdateRoots(IsMarkedVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
// Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index 338a41e..2263797 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -87,7 +87,7 @@
}
inline size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
size_t num_bytes = obj->SizeOf();
if (usable_size != nullptr) {
*usable_size = RoundUp(num_bytes, kAlignment);
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index df43606..0e27d84 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -51,14 +51,14 @@
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ OVERRIDE REQUIRES(Locks::mutator_lock_);
mirror::Object* AllocNonvirtual(size_t num_bytes);
mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes);
// Return the storage space required by obj.
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return AllocationSizeNonvirtual(obj, usable_size);
}
@@ -72,7 +72,7 @@
}
size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Removes the fork time growth limit on capacity, allowing the application to allocate up to the
// maximum reserved size of the heap.
@@ -99,19 +99,21 @@
}
// Reset the space to empty.
- void Clear() OVERRIDE LOCKS_EXCLUDED(block_lock_);
+ void Clear() OVERRIDE REQUIRES(!block_lock_);
void Dump(std::ostream& os) const;
- size_t RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(block_lock_);
- size_t RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
- Locks::thread_list_lock_);
- void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(block_lock_);
- void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
- Locks::thread_list_lock_);
+ size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!block_lock_);
+ size_t RevokeAllThreadLocalBuffers()
+ REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
+ void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!block_lock_);
+ void AssertAllThreadLocalBuffersAreRevoked()
+ REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
- uint64_t GetBytesAllocated() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- uint64_t GetObjectsAllocated() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint64_t GetBytesAllocated() SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
+ uint64_t GetObjectsAllocated() SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
bool IsEmpty() const {
return Begin() == End();
}
@@ -130,10 +132,10 @@
// Return the object which comes after obj, while ensuring alignment.
static mirror::Object* GetNextObject(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Allocate a new TLAB, returns false if the allocation failed.
- bool AllocNewTlab(Thread* self, size_t bytes);
+ bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_);
BumpPointerSpace* AsBumpPointerSpace() OVERRIDE {
return this;
@@ -141,7 +143,7 @@
// Go through all of the blocks and visit the continuous objects.
void Walk(ObjectCallback* callback, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!block_lock_);
accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
@@ -152,7 +154,7 @@
}
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Object alignment within the space.
static constexpr size_t kAlignment = 8;
@@ -161,13 +163,13 @@
BumpPointerSpace(const std::string& name, MemMap* mem_map);
// Allocate a raw block of bytes.
- uint8_t* AllocBlock(size_t bytes) EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
- void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
+ uint8_t* AllocBlock(size_t bytes) REQUIRES(block_lock_);
+ void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(block_lock_);
// The main block is an unbounded block where objects go when there are no other blocks. This
// enables us to maintain tightly packed objects when you are not using thread local buffers for
// allocation. The main block starts at the space Begin().
- void UpdateMainBlock() EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
+ void UpdateMainBlock() REQUIRES(block_lock_);
uint8_t* growth_end_;
AtomicInteger objects_allocated_; // Accumulated from revoked thread local regions.
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index ab527a4..eab757a 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -50,11 +50,11 @@
virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated)
- OVERRIDE LOCKS_EXCLUDED(lock_);
+ OVERRIDE REQUIRES(!lock_);
// Virtual to allow MemoryToolMallocSpace to intercept.
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE LOCKS_EXCLUDED(lock_) {
+ OVERRIDE REQUIRES(!lock_) {
return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
@@ -64,12 +64,12 @@
}
// Virtual to allow MemoryToolMallocSpace to intercept.
virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
- LOCKS_EXCLUDED(lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Virtual to allow MemoryToolMallocSpace to intercept.
virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
- LOCKS_EXCLUDED(lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
return num_bytes;
@@ -86,7 +86,7 @@
// Faster non-virtual allocation path.
mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- LOCKS_EXCLUDED(lock_);
+ REQUIRES(!lock_);
// Faster non-virtual allocation size path.
size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size);
@@ -104,7 +104,7 @@
// Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
// in use, indicated by num_bytes equaling zero.
- void Walk(WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
+ void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
// Returns the number of bytes that the space has currently obtained from the system. This is
// greater or equal to the amount of live data in the space.
@@ -136,7 +136,7 @@
}
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
protected:
DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name, void* mspace,
@@ -147,7 +147,7 @@
mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated)
- EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ REQUIRES(lock_);
void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
size_t /*maximum_size*/, bool /*low_memory_mode*/) OVERRIDE {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 1923d24..aba32a0 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -25,6 +25,7 @@
#include "art_method.h"
#include "base/macros.h"
+#include "base/out.h"
#include "base/stl_util.h"
#include "base/scoped_flock.h"
#include "base/time_utils.h"
@@ -207,7 +208,7 @@
// Note: we do not generate a fully debuggable boot image so we do not pass the
// compiler flag --debuggable here.
- Runtime::Current()->AddCurrentRuntimeFeaturesAsDex2OatArguments(&arg_vector);
+ Runtime::Current()->AddCurrentRuntimeFeaturesAsDex2OatArguments(outof(arg_vector));
CHECK_EQ(image_isa, kRuntimeISA)
<< "We should always be generating an image for the current isa.";
@@ -789,10 +790,13 @@
CHECK(image_header.GetOatDataBegin() != nullptr);
- OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, image_header.GetOatDataBegin(),
+ OatFile* oat_file = OatFile::Open(oat_filename,
+ oat_filename,
+ image_header.GetOatDataBegin(),
image_header.GetOatFileBegin(),
!Runtime::Current()->IsAotCompiler(),
- nullptr, error_msg);
+ nullptr /* no abs dex location */,
+ outof_ptr(error_msg));
if (oat_file == nullptr) {
*error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
oat_filename.c_str(), GetName(), error_msg->c_str());
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 93ff8aa..215c18b 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -44,7 +44,7 @@
// used to transfer ownership of the OatFile to the ClassLinker when
// it is initialized.
static ImageSpace* Create(const char* image, InstructionSet image_isa, std::string* error_msg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Reads the image header from the specified image location for the
// instruction set image_isa or dies trying.
@@ -64,10 +64,10 @@
// Releases the OatFile from the ImageSpace so it can be transfer to
// the caller, presumably the ClassLinker.
OatFile* ReleaseOatFile()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void VerifyImageAllocations()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const ImageHeader& GetImageHeader() const {
return *reinterpret_cast<ImageHeader*>(Begin());
@@ -130,13 +130,13 @@
// the OatFile in /data/dalvik-cache if necessary.
static ImageSpace* Init(const char* image_filename, const char* image_location,
bool validate_oat_file, std::string* error_msg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
OatFile* OpenOatFile(const char* image, std::string* error_msg) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool ValidateOatFile(std::string* error_msg) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
friend class Space;
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 45ed0cd..c726998 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -96,7 +96,7 @@
return Begin() <= byte_obj && byte_obj < End();
}
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Return true if the large object is a zygote large object. Potentially slow.
virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0;
@@ -130,11 +130,12 @@
// of malloc.
static LargeObjectMapSpace* Create(const std::string& name);
// Return the storage space required by obj.
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size);
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) REQUIRES(!lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated);
- size_t Free(Thread* self, mirror::Object* ptr);
- void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ REQUIRES(!lock_);
+ size_t Free(Thread* self, mirror::Object* ptr) REQUIRES(!lock_);
+ void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE REQUIRES(!lock_);
// TODO: disabling thread safety analysis as this may be called when we already hold lock_.
bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
@@ -146,8 +147,8 @@
explicit LargeObjectMapSpace(const std::string& name);
virtual ~LargeObjectMapSpace() {}
- bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE LOCKS_EXCLUDED(lock_);
- void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE LOCKS_EXCLUDED(lock_);
+ bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE REQUIRES(!lock_);
+ void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
// Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -163,12 +164,13 @@
virtual ~FreeListSpace();
static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
- EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ REQUIRES(lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
- size_t Free(Thread* self, mirror::Object* obj) OVERRIDE;
- void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
- void Dump(std::ostream& os) const;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ OVERRIDE REQUIRES(!lock_);
+ size_t Free(Thread* self, mirror::Object* obj) OVERRIDE REQUIRES(!lock_);
+ void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
+ void Dump(std::ostream& os) const REQUIRES(!lock_);
protected:
FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end);
@@ -186,9 +188,9 @@
return GetAllocationAddressForSlot(GetSlotIndexForAllocationInfo(info));
}
// Removes header from the free blocks set by finding the corresponding iterator and erasing it.
- void RemoveFreePrev(AllocationInfo* info) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_);
bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE;
- void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE;
+ void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
class SortByPrevFree {
public:
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 6c689cd..4e56c4a 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -63,9 +63,9 @@
// amount of the storage space that may be used by obj.
virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
virtual size_t Free(Thread* self, mirror::Object* ptr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
// Returns the maximum bytes that could be allocated for the given
// size in bulk, that is the maximum value for the
@@ -160,8 +160,8 @@
size_t maximum_size, bool low_memory_mode) = 0;
virtual void RegisterRecentFree(mirror::Object* ptr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(lock_);
virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
return &SweepCallback;
@@ -196,7 +196,7 @@
private:
static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(MallocSpace);
};
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index 64c6f35..fe39e05 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -38,15 +38,15 @@
size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ OVERRIDE REQUIRES(Locks::mutator_lock_);
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void RegisterRecentFree(mirror::Object* ptr) OVERRIDE {
UNUSED(ptr);
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index db005f7..66fd62c 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -138,8 +138,7 @@
return reinterpret_cast<mirror::Object*>(old_top);
}
-inline size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
size_t num_bytes = obj->SizeOf();
if (usable_size != nullptr) {
if (LIKELY(num_bytes <= kRegionSize)) {
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 19109f0..14e8005 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -42,29 +42,31 @@
// Allocate num_bytes, returns null if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ OVERRIDE REQUIRES(!region_lock_);
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ OVERRIDE REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
// The main allocation routine.
template<bool kForEvac>
ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size,
- size_t* bytes_tl_bulk_allocated);
+ size_t* bytes_tl_bulk_allocated)
+ REQUIRES(!region_lock_);
// Allocate/free large objects (objects that are larger than the region size.)
template<bool kForEvac>
mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size,
- size_t* bytes_tl_bulk_allocated);
- void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated);
+ size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_);
+ void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
// Return the storage space required by obj.
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_) {
return AllocationSizeNonvirtual(obj, usable_size);
}
size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
size_t Free(Thread*, mirror::Object*) OVERRIDE {
UNIMPLEMENTED(FATAL);
@@ -83,19 +85,19 @@
return nullptr;
}
- void Clear() OVERRIDE LOCKS_EXCLUDED(region_lock_);
+ void Clear() OVERRIDE REQUIRES(!region_lock_);
void Dump(std::ostream& os) const;
- void DumpRegions(std::ostream& os);
- void DumpNonFreeRegions(std::ostream& os);
+ void DumpRegions(std::ostream& os) REQUIRES(!region_lock_);
+ void DumpNonFreeRegions(std::ostream& os) REQUIRES(!region_lock_);
- size_t RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(region_lock_);
- void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(region_lock_);
- size_t RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
- Locks::thread_list_lock_);
- void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(region_lock_);
- void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
- Locks::thread_list_lock_);
+ size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!region_lock_);
+ void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(region_lock_);
+ size_t RevokeAllThreadLocalBuffers()
+ REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
+ void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!region_lock_);
+ void AssertAllThreadLocalBuffersAreRevoked()
+ REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
enum class RegionType : uint8_t {
kRegionTypeAll, // All types.
@@ -112,24 +114,24 @@
kRegionStateLargeTail, // Large tail (non-first regions of a large allocation).
};
- template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal();
- template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal();
- uint64_t GetBytesAllocated() {
+ template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal() REQUIRES(!region_lock_);
+ template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal() REQUIRES(!region_lock_);
+ uint64_t GetBytesAllocated() REQUIRES(!region_lock_) {
return GetBytesAllocatedInternal<RegionType::kRegionTypeAll>();
}
- uint64_t GetObjectsAllocated() {
+ uint64_t GetObjectsAllocated() REQUIRES(!region_lock_) {
return GetObjectsAllocatedInternal<RegionType::kRegionTypeAll>();
}
- uint64_t GetBytesAllocatedInFromSpace() {
+ uint64_t GetBytesAllocatedInFromSpace() REQUIRES(!region_lock_) {
return GetBytesAllocatedInternal<RegionType::kRegionTypeFromSpace>();
}
- uint64_t GetObjectsAllocatedInFromSpace() {
+ uint64_t GetObjectsAllocatedInFromSpace() REQUIRES(!region_lock_) {
return GetObjectsAllocatedInternal<RegionType::kRegionTypeFromSpace>();
}
- uint64_t GetBytesAllocatedInUnevacFromSpace() {
+ uint64_t GetBytesAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
return GetBytesAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
}
- uint64_t GetObjectsAllocatedInUnevacFromSpace() {
+ uint64_t GetObjectsAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
return GetObjectsAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
}
@@ -148,12 +150,12 @@
// Go through all of the blocks and visit the continuous objects.
void Walk(ObjectCallback* callback, void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ REQUIRES(Locks::mutator_lock_) {
WalkInternal<false>(callback, arg);
}
void WalkToSpace(ObjectCallback* callback, void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ REQUIRES(Locks::mutator_lock_) {
WalkInternal<true>(callback, arg);
}
@@ -161,7 +163,7 @@
return nullptr;
}
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
// Object alignment within the space.
static constexpr size_t kAlignment = kObjectAlignment;
@@ -201,22 +203,22 @@
}
void SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all)
- LOCKS_EXCLUDED(region_lock_);
+ REQUIRES(!region_lock_);
- size_t FromSpaceSize();
- size_t UnevacFromSpaceSize();
- size_t ToSpaceSize();
- void ClearFromSpace();
+ size_t FromSpaceSize() REQUIRES(!region_lock_);
+ size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
+ size_t ToSpaceSize() REQUIRES(!region_lock_);
+ void ClearFromSpace() REQUIRES(!region_lock_);
void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
Region* reg = RefToRegionUnlocked(ref);
reg->AddLiveBytes(alloc_size);
}
- void AssertAllRegionLiveBytesZeroOrCleared();
+ void AssertAllRegionLiveBytesZeroOrCleared() REQUIRES(!region_lock_);
- void RecordAlloc(mirror::Object* ref);
- bool AllocNewTlab(Thread* self);
+ void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_);
+ bool AllocNewTlab(Thread* self) REQUIRES(!region_lock_);
uint32_t Time() {
return time_;
@@ -476,7 +478,7 @@
friend class RegionSpace;
};
- Region* RefToRegion(mirror::Object* ref) LOCKS_EXCLUDED(region_lock_) {
+ Region* RefToRegion(mirror::Object* ref) REQUIRES(!region_lock_) {
MutexLock mu(Thread::Current(), region_lock_);
return RefToRegionLocked(ref);
}
@@ -492,7 +494,7 @@
return RefToRegionLocked(ref);
}
- Region* RefToRegionLocked(mirror::Object* ref) EXCLUSIVE_LOCKS_REQUIRED(region_lock_) {
+ Region* RefToRegionLocked(mirror::Object* ref) REQUIRES(region_lock_) {
DCHECK(HasAddress(ref));
uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
size_t reg_idx = offset / kRegionSize;
@@ -504,7 +506,7 @@
}
mirror::Object* GetNextObject(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 9dc6f31..bc14738 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -48,7 +48,7 @@
mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE LOCKS_EXCLUDED(lock_);
+ OVERRIDE REQUIRES(!lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE {
return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
@@ -56,7 +56,7 @@
}
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES(Locks::mutator_lock_) {
return AllocNonvirtualThreadUnsafe(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
@@ -64,9 +64,9 @@
return AllocationSizeNonvirtual<true>(obj, usable_size);
}
size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
@@ -104,7 +104,7 @@
}
size_t Trim() OVERRIDE;
- void Walk(WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
+ void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
size_t GetFootprint() OVERRIDE;
size_t GetFootprintLimit() OVERRIDE;
void SetFootprintLimit(size_t limit) OVERRIDE;
@@ -134,7 +134,7 @@
return this;
}
- void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void Verify() REQUIRES(Locks::mutator_lock_) {
rosalloc_->Verify();
}
@@ -166,11 +166,11 @@
void InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
void* arg, bool do_null_callback_at_end)
- LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_);
+ REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_);
void InspectAllRosAllocWithSuspendAll(
void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
void* arg, bool do_null_callback_at_end)
- LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_);
+ REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_);
// Underlying rosalloc.
allocator::RosAlloc* rosalloc_;
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 871ebac..fc558cf 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -219,7 +219,7 @@
virtual mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ REQUIRES(Locks::mutator_lock_) {
return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
}
@@ -420,10 +420,9 @@
return this;
}
- bool HasBoundBitmaps() const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- void BindLiveToMarkBitmap()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ bool HasBoundBitmaps() const REQUIRES(Locks::heap_bitmap_lock_);
+ void BindLiveToMarkBitmap() REQUIRES(Locks::heap_bitmap_lock_);
+ void UnBindBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
// Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
void SwapBitmaps();
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 6e0e0d2..4d2db11 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -49,7 +49,7 @@
heap->SetSpaceAsDefault(space);
}
- mirror::Class* GetByteArrayClass(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* GetByteArrayClass(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
if (byte_array_class_ == nullptr) {
@@ -65,7 +65,7 @@
mirror::Object* Alloc(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
size_t* bytes_allocated, size_t* usable_size,
size_t* bytes_tl_bulk_allocated)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size,
@@ -79,7 +79,7 @@
mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
size_t* bytes_allocated, size_t* usable_size,
size_t* bytes_tl_bulk_allocated)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size,
@@ -91,7 +91,7 @@
}
void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Note the minimum size, which is the size of a zero-length byte array.
EXPECT_GE(size, SizeOfZeroLengthByteArray());
EXPECT_TRUE(byte_array_class != nullptr);
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 934a234..f2889e2 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -33,7 +33,7 @@
static ZygoteSpace* Create(const std::string& name, MemMap* mem_map,
accounting::ContinuousSpaceBitmap* live_bitmap,
accounting::ContinuousSpaceBitmap* mark_bitmap)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void Dump(std::ostream& os) const;
@@ -77,7 +77,7 @@
}
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
protected:
virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
diff --git a/runtime/gc/task_processor.h b/runtime/gc/task_processor.h
index 5f48619..e40fa06 100644
--- a/runtime/gc/task_processor.h
+++ b/runtime/gc/task_processor.h
@@ -54,17 +54,17 @@
public:
TaskProcessor();
virtual ~TaskProcessor();
- void AddTask(Thread* self, HeapTask* task) LOCKS_EXCLUDED(lock_);
- HeapTask* GetTask(Thread* self) LOCKS_EXCLUDED(lock_);
- void Start(Thread* self) LOCKS_EXCLUDED(lock_);
+ void AddTask(Thread* self, HeapTask* task) REQUIRES(!*lock_);
+ HeapTask* GetTask(Thread* self) REQUIRES(!*lock_);
+ void Start(Thread* self) REQUIRES(!*lock_);
// Stop tells the RunAllTasks to finish up the remaining tasks as soon as
// possible then return.
- void Stop(Thread* self) LOCKS_EXCLUDED(lock_);
- void RunAllTasks(Thread* self) LOCKS_EXCLUDED(lock_);
- bool IsRunning() const LOCKS_EXCLUDED(lock_);
+ void Stop(Thread* self) REQUIRES(!*lock_);
+ void RunAllTasks(Thread* self) REQUIRES(!*lock_);
+ bool IsRunning() const REQUIRES(!*lock_);
void UpdateTargetRunTime(Thread* self, HeapTask* target_time, uint64_t new_target_time)
- LOCKS_EXCLUDED(lock_);
- Thread* GetRunningThread() const LOCKS_EXCLUDED(lock_);
+ REQUIRES(!*lock_);
+ Thread* GetRunningThread() const REQUIRES(!*lock_);
private:
class CompareByTargetRunTime {
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index bb604f0..83471e6 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -91,24 +91,24 @@
// Single root version, not overridable.
ALWAYS_INLINE void VisitRoot(mirror::Object** roots, const RootInfo& info)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
VisitRoots(&roots, 1, info);
}
// Single root version, not overridable.
ALWAYS_INLINE void VisitRootIfNonNull(mirror::Object** roots, const RootInfo& info)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (*roots != nullptr) {
VisitRoot(roots, info);
}
}
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
};
// Only visits roots one at a time, doesn't handle updating roots. Used when performance isn't
@@ -116,7 +116,7 @@
class SingleRootVisitor : public RootVisitor {
private:
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
VisitRoot(*roots[i], info);
}
@@ -124,7 +124,7 @@
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
VisitRoot(roots[i]->AsMirrorPtr(), info);
}
@@ -169,10 +169,10 @@
public:
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE MirrorType* Read(GcRootSource* gc_root_source = nullptr) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void VisitRoot(RootVisitor* visitor, const RootInfo& info) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!IsNull());
mirror::CompressedReference<mirror::Object>* roots[1] = { &root_ };
visitor->VisitRoots(roots, 1u, info);
@@ -180,7 +180,7 @@
}
void VisitRootIfNonNull(RootVisitor* visitor, const RootInfo& info) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (!IsNull()) {
VisitRoot(visitor, info);
}
@@ -195,7 +195,7 @@
return root_.IsNull();
}
- ALWAYS_INLINE GcRoot(MirrorType* ref = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE GcRoot(MirrorType* ref = nullptr) SHARED_REQUIRES(Locks::mutator_lock_);
private:
// Root visitors take pointers to root_ and place the min CompressedReference** arrays. We use a
@@ -222,7 +222,7 @@
template <class MirrorType>
ALWAYS_INLINE void VisitRootIfNonNull(GcRoot<MirrorType>& root)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (!root.IsNull()) {
VisitRoot(root);
}
@@ -230,27 +230,27 @@
template <class MirrorType>
ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
template <class MirrorType>
- void VisitRoot(GcRoot<MirrorType>& root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void VisitRoot(GcRoot<MirrorType>& root) SHARED_REQUIRES(Locks::mutator_lock_) {
VisitRoot(root.AddressWithoutBarrier());
}
template <class MirrorType>
void VisitRoot(mirror::CompressedReference<MirrorType>* root)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (UNLIKELY(buffer_pos_ >= kBufferSize)) {
Flush();
}
roots_[buffer_pos_++] = root;
}
- void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void Flush() SHARED_REQUIRES(Locks::mutator_lock_) {
visitor_->VisitRoots(roots_, buffer_pos_, root_info_);
buffer_pos_ = 0;
}
diff --git a/runtime/handle.h b/runtime/handle.h
index d94d875..f939ec5 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -50,19 +50,19 @@
ALWAYS_INLINE explicit Handle(StackReference<T>* reference) : reference_(reference) {
}
- ALWAYS_INLINE T& operator*() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE T& operator*() const SHARED_REQUIRES(Locks::mutator_lock_) {
return *Get();
}
- ALWAYS_INLINE T* operator->() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE T* operator->() const SHARED_REQUIRES(Locks::mutator_lock_) {
return Get();
}
- ALWAYS_INLINE T* Get() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE T* Get() const SHARED_REQUIRES(Locks::mutator_lock_) {
return down_cast<T*>(reference_->AsMirrorPtr());
}
- ALWAYS_INLINE jobject ToJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE jobject ToJObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
if (UNLIKELY(reference_->AsMirrorPtr() == nullptr)) {
// Special case so that we work with NullHandles.
return nullptr;
@@ -71,12 +71,12 @@
}
ALWAYS_INLINE StackReference<mirror::Object>* GetReference()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return reference_;
}
ALWAYS_INLINE const StackReference<mirror::Object>* GetReference() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return reference_;
}
@@ -108,22 +108,22 @@
}
ALWAYS_INLINE MutableHandle(const MutableHandle<T>& handle)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: Handle<T>(handle.reference_) {
}
ALWAYS_INLINE MutableHandle<T>& operator=(const MutableHandle<T>& handle)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Handle<T>::operator=(handle);
return *this;
}
ALWAYS_INLINE explicit MutableHandle(StackReference<T>* reference)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: Handle<T>(reference) {
}
- ALWAYS_INLINE T* Assign(T* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE T* Assign(T* reference) SHARED_REQUIRES(Locks::mutator_lock_) {
StackReference<mirror::Object>* ref = Handle<T>::GetReference();
T* old = down_cast<T*>(ref->AsMirrorPtr());
ref->Assign(reference);
@@ -131,12 +131,12 @@
}
template<typename S>
- explicit MutableHandle(const MutableHandle<S>& handle) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ explicit MutableHandle(const MutableHandle<S>& handle) SHARED_REQUIRES(Locks::mutator_lock_)
: Handle<T>(handle) {
}
template<typename S>
- explicit MutableHandle(StackReference<S>* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ explicit MutableHandle(StackReference<S>* reference) SHARED_REQUIRES(Locks::mutator_lock_)
: Handle<T>(reference) {
}
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 9a0e52e..5ed8ef0 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -60,16 +60,16 @@
}
ALWAYS_INLINE mirror::Object* GetReference(size_t i) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE Handle<mirror::Object> GetHandle(size_t i)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE MutableHandle<mirror::Object> GetMutableHandle(size_t i)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const;
@@ -150,14 +150,14 @@
ALWAYS_INLINE ~StackHandleScope();
template<class T>
- ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) SHARED_REQUIRES(Locks::mutator_lock_);
template<class T>
ALWAYS_INLINE HandleWrapper<T> NewHandleWrapper(T** object)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
Thread* Self() const {
return self_;
@@ -165,7 +165,7 @@
private:
template<class T>
- ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK_LT(i, kNumReferences);
return MutableHandle<T>(&GetReferences()[i]);
}
@@ -209,7 +209,7 @@
}
template<class T>
- MutableHandle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ MutableHandle<T> NewHandle(T* object) SHARED_REQUIRES(Locks::mutator_lock_) {
if (scopes_.empty() || current_scope_num_refs_ >= kNumReferencesPerScope) {
StackHandleScope<kNumReferencesPerScope>* scope =
new StackHandleScope<kNumReferencesPerScope>(self_);
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 71a69aa..e67ea3f 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -240,7 +240,7 @@
}
void AddIdList(mirror::ObjectArray<mirror::Object>* values)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const int32_t length = values->GetLength();
for (int32_t i = 0; i < length; ++i) {
AddObjectId(values->GetWithoutChecks(i));
@@ -429,8 +429,7 @@
}
void Dump()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_, Locks::alloc_tracker_lock_) {
+ REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !Locks::alloc_tracker_lock_) {
{
MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
if (Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()) {
@@ -471,26 +470,26 @@
private:
static void VisitObjectCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
DCHECK(arg != nullptr);
reinterpret_cast<Hprof*>(arg)->DumpHeapObject(obj);
}
void DumpHeapObject(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void DumpHeapClass(mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void DumpHeapArray(mirror::Array* obj, mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void ProcessHeap(bool header_first)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ REQUIRES(Locks::mutator_lock_) {
// Reset current heap and object count.
current_heap_ = HPROF_HEAP_DEFAULT;
objects_in_segment_ = 0;
@@ -504,7 +503,7 @@
}
}
- void ProcessBody() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void ProcessBody() REQUIRES(Locks::mutator_lock_) {
Runtime* const runtime = Runtime::Current();
// Walk the roots and the heap.
output_->StartNewRecord(HPROF_TAG_HEAP_DUMP_SEGMENT, kHprofTime);
@@ -517,7 +516,7 @@
output_->EndRecord();
}
- void ProcessHeader(bool string_first) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void ProcessHeader(bool string_first) REQUIRES(Locks::mutator_lock_) {
// Write the header.
WriteFixedHeader();
// Write the string and class tables, and any stack traces, to the header.
@@ -536,7 +535,7 @@
output_->EndRecord();
}
- void WriteClassTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void WriteClassTable() SHARED_REQUIRES(Locks::mutator_lock_) {
for (const auto& p : classes_) {
mirror::Class* c = p.first;
HprofClassSerialNumber sn = p.second;
@@ -585,11 +584,11 @@
}
void VisitRoot(mirror::Object* obj, const RootInfo& root_info)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
void MarkRootObject(const mirror::Object* obj, jobject jni_obj, HprofHeapTag heap_tag,
uint32_t thread_serial);
- HprofClassObjectId LookupClassId(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ HprofClassObjectId LookupClassId(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) {
if (c != nullptr) {
auto it = classes_.find(c);
if (it == classes_.end()) {
@@ -604,7 +603,7 @@
}
HprofStackTraceSerialNumber LookupStackTraceSerialNumber(const mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
auto r = allocation_records_.find(obj);
if (r == allocation_records_.end()) {
return kHprofNullStackTrace;
@@ -616,7 +615,7 @@
}
}
- HprofStringId LookupStringId(mirror::String* string) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ HprofStringId LookupStringId(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_) {
return LookupStringId(string->ToModifiedUtf8());
}
@@ -634,7 +633,7 @@
return id;
}
- HprofStringId LookupClassNameId(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ HprofStringId LookupClassNameId(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) {
return LookupStringId(PrettyDescriptor(c));
}
@@ -662,7 +661,7 @@
__ AddU4(static_cast<uint32_t>(nowMs & 0xFFFFFFFF));
}
- void WriteStackTraces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void WriteStackTraces() SHARED_REQUIRES(Locks::mutator_lock_) {
// Write a dummy stack trace record so the analysis tools don't freak out.
output_->StartNewRecord(HPROF_TAG_STACK_TRACE, kHprofTime);
__ AddStackTraceSerialNumber(kHprofNullStackTrace);
@@ -725,7 +724,7 @@
}
bool DumpToDdmsBuffered(size_t overall_size ATTRIBUTE_UNUSED, size_t max_length ATTRIBUTE_UNUSED)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ REQUIRES(Locks::mutator_lock_) {
LOG(FATAL) << "Unimplemented";
UNREACHABLE();
// // Send the data off to DDMS.
@@ -738,7 +737,7 @@
}
bool DumpToFile(size_t overall_size, size_t max_length)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ REQUIRES(Locks::mutator_lock_) {
// Where exactly are we writing to?
int out_fd;
if (fd_ >= 0) {
@@ -787,7 +786,7 @@
}
bool DumpToDdmsDirect(size_t overall_size, size_t max_length, uint32_t chunk_type)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ REQUIRES(Locks::mutator_lock_) {
CHECK(direct_to_ddms_);
JDWP::JdwpState* state = Dbg::GetJdwpState();
CHECK(state != nullptr);
@@ -818,7 +817,7 @@
}
void PopulateAllocationTrackingTraces()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::alloc_tracker_lock_) {
+ REQUIRES(Locks::mutator_lock_, Locks::alloc_tracker_lock_) {
gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords();
CHECK(records != nullptr);
HprofStackTraceSerialNumber next_trace_sn = kHprofNullStackTrace + 1;
diff --git a/runtime/image.h b/runtime/image.h
index d856f21..cc98ba6 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -156,9 +156,9 @@
}
mirror::Object* GetImageRoot(ImageRoot image_root) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* GetImageRoots() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void RelocateImage(off_t delta);
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 20e4222..c9ba6cf 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -28,14 +28,16 @@
namespace art {
+static constexpr bool kDumpStackOnNonLocalReference = false;
+
template<typename T>
class MutatorLockedDumpable {
public:
explicit MutatorLockedDumpable(T& value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : value_(value) {
+ SHARED_REQUIRES(Locks::mutator_lock_) : value_(value) {
}
- void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_) {
value_.Dump(os);
}
@@ -47,7 +49,7 @@
template<typename T>
std::ostream& operator<<(std::ostream& os, const MutatorLockedDumpable<T>& rhs)
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) however annotalysis
+// TODO: should be SHARED_REQUIRES(Locks::mutator_lock_) however annotalysis
// currently fails for this.
NO_THREAD_SAFETY_ANALYSIS {
rhs.Dump(os);
@@ -183,7 +185,9 @@
if (env->check_jni) {
ScopedObjectAccess soa(self);
LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread";
- self->Dump(LOG(WARNING));
+ if (kDumpStackOnNonLocalReference) {
+ self->Dump(LOG(WARNING));
+ }
}
return true;
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index dea5dfd..798b48c 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -199,7 +199,7 @@
static const size_t kIRTPrevCount = kIsDebugBuild ? 7 : 3;
class IrtEntry {
public:
- void Add(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void Add(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
++serial_;
if (serial_ == kIRTPrevCount) {
serial_ = 0;
@@ -228,11 +228,11 @@
class IrtIterator {
public:
explicit IrtIterator(IrtEntry* table, size_t i, size_t capacity)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: table_(table), i_(i), capacity_(capacity) {
}
- IrtIterator& operator++() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ IrtIterator& operator++() SHARED_REQUIRES(Locks::mutator_lock_) {
++i_;
return *this;
}
@@ -278,7 +278,7 @@
* failed during expansion).
*/
IndirectRef Add(uint32_t cookie, mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Given an IndirectRef in the table, return the Object it refers to.
@@ -286,14 +286,14 @@
* Returns kInvalidIndirectRefObject if iref is invalid.
*/
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- mirror::Object* Get(IndirectRef iref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ mirror::Object* Get(IndirectRef iref) const SHARED_REQUIRES(Locks::mutator_lock_)
ALWAYS_INLINE;
// Synchronized get which reads a reference, acquiring a lock if necessary.
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
mirror::Object* SynchronizedGet(Thread* /*self*/, ReaderWriterMutex* /*mutex*/,
IndirectRef iref) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return Get<kReadBarrierOption>(iref);
}
@@ -302,7 +302,7 @@
*
* Updates an existing indirect reference to point to a new object.
*/
- void Update(IndirectRef iref, mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Update(IndirectRef iref, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Remove an existing entry.
@@ -317,7 +317,7 @@
void AssertEmpty();
- void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Return the #of entries in the entire table. This includes holes, and
@@ -337,7 +337,7 @@
}
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
uint32_t GetSegmentState() const {
return segment_state_.all;
@@ -352,7 +352,7 @@
}
// Release pages past the end of the table that may have previously held references.
- void Trim() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Trim() SHARED_REQUIRES(Locks::mutator_lock_);
private:
// Extract the table index from an indirect reference.
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index c20002b..f376ec0 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -26,7 +26,7 @@
class IndirectReferenceTableTest : public CommonRuntimeTest {};
static void CheckDump(IndirectReferenceTable* irt, size_t num_objects, size_t num_unique)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::ostringstream oss;
irt->Dump(oss);
if (num_objects == 0) {
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index abe9dc2..e28d578 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -49,12 +49,20 @@
static constexpr StackVisitor::StackWalkKind kInstrumentationStackWalk =
StackVisitor::StackWalkKind::kSkipInlinedFrames;
-static bool InstallStubsClassVisitor(mirror::Class* klass, void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Instrumentation* instrumentation = reinterpret_cast<Instrumentation*>(arg);
- instrumentation->InstallStubsForClass(klass);
- return true; // we visit all classes.
-}
+class InstallStubsClassVisitor : public ClassVisitor {
+ public:
+ explicit InstallStubsClassVisitor(Instrumentation* instrumentation)
+ : instrumentation_(instrumentation) {}
+
+ bool Visit(mirror::Class* klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ instrumentation_->InstallStubsForClass(klass);
+ return true; // we visit all classes.
+ }
+
+ private:
+ Instrumentation* const instrumentation_;
+};
+
Instrumentation::Instrumentation()
: instrumentation_stubs_installed_(false), entry_exit_stubs_installed_(false),
@@ -87,7 +95,7 @@
}
static void UpdateEntrypoints(ArtMethod* method, const void* quick_code)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Runtime* const runtime = Runtime::Current();
jit::Jit* jit = runtime->GetJit();
if (jit != nullptr) {
@@ -151,7 +159,7 @@
// Since we may already have done this previously, we need to push new instrumentation frame before
// existing instrumentation frames.
static void InstrumentationInstallStack(Thread* thread, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
struct InstallStackVisitor FINAL : public StackVisitor {
InstallStackVisitor(Thread* thread_in, Context* context, uintptr_t instrumentation_exit_pc)
: StackVisitor(thread_in, context, kInstrumentationStackWalk),
@@ -161,7 +169,7 @@
last_return_pc_(0) {
}
- bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m == nullptr) {
if (kVerboseInstrumentation) {
@@ -291,7 +299,7 @@
// Removes the instrumentation exit pc as the return PC for every quick frame.
static void InstrumentationRestoreStack(Thread* thread, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
struct RestoreStackVisitor FINAL : public StackVisitor {
RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc,
Instrumentation* instrumentation)
@@ -302,7 +310,7 @@
instrumentation_stack_(thread_in->GetInstrumentationStack()),
frames_removed_(0) {}
- bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
if (instrumentation_stack_->size() == 0) {
return false; // Stop.
}
@@ -563,14 +571,16 @@
entry_exit_stubs_installed_ = true;
interpreter_stubs_installed_ = false;
}
- runtime->GetClassLinker()->VisitClasses(InstallStubsClassVisitor, this);
+ InstallStubsClassVisitor visitor(this);
+ runtime->GetClassLinker()->VisitClasses(&visitor);
instrumentation_stubs_installed_ = true;
MutexLock mu(self, *Locks::thread_list_lock_);
runtime->GetThreadList()->ForEach(InstrumentationInstallStack, this);
} else {
interpreter_stubs_installed_ = false;
entry_exit_stubs_installed_ = false;
- runtime->GetClassLinker()->VisitClasses(InstallStubsClassVisitor, this);
+ InstallStubsClassVisitor visitor(this);
+ runtime->GetClassLinker()->VisitClasses(&visitor);
// Restore stack only if there is no method currently deoptimized.
bool empty;
{
@@ -931,7 +941,7 @@
static void CheckStackDepth(Thread* self, const InstrumentationStackFrame& instrumentation_frame,
int delta)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
size_t frame_id = StackVisitor::ComputeNumFrames(self, kInstrumentationStackWalk) + delta;
if (frame_id != instrumentation_frame.frame_id_) {
LOG(ERROR) << "Expected frame_id=" << frame_id << " but found "
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index db8e9c2..93ff567 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -63,24 +63,24 @@
// Call-back for when a method is entered.
virtual void MethodEntered(Thread* thread, mirror::Object* this_object,
ArtMethod* method,
- uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ uint32_t dex_pc) SHARED_REQUIRES(Locks::mutator_lock_) = 0;
// Call-back for when a method is exited.
virtual void MethodExited(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
const JValue& return_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
// Call-back for when a method is popped due to an exception throw. A method will either cause a
// MethodExited call-back or a MethodUnwind call-back when its activation is removed.
virtual void MethodUnwind(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
// Call-back for when the dex pc moves in a method.
virtual void DexPcMoved(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t new_dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
// Call-back for when we read from a field.
virtual void FieldRead(Thread* thread, mirror::Object* this_object, ArtMethod* method,
@@ -92,11 +92,11 @@
// Call-back when an exception is caught.
virtual void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
// Call-back for when we get a backward branch.
virtual void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
};
// Instrumentation is a catch-all for when extra information is required from the runtime. The
@@ -129,90 +129,83 @@
// for saying you should have suspended all threads (installing stubs while threads are running
// will break).
void AddListener(InstrumentationListener* listener, uint32_t events)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
+ REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
// Removes a listener possibly removing instrumentation stubs.
void RemoveListener(InstrumentationListener* listener, uint32_t events)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
+ REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
// Deoptimization.
void EnableDeoptimization()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(deoptimized_methods_lock_);
+ REQUIRES(Locks::mutator_lock_, !deoptimized_methods_lock_);
void DisableDeoptimization(const char* key)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(deoptimized_methods_lock_);
+ REQUIRES(Locks::mutator_lock_, !deoptimized_methods_lock_);
bool AreAllMethodsDeoptimized() const {
return interpreter_stubs_installed_;
}
- bool ShouldNotifyMethodEnterExitEvents() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool ShouldNotifyMethodEnterExitEvents() const SHARED_REQUIRES(Locks::mutator_lock_);
// Executes everything with interpreter.
void DeoptimizeEverything(const char* key)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
+ REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_,
+ !deoptimized_methods_lock_);
// Executes everything with compiled code (or interpreter if there is no code).
void UndeoptimizeEverything(const char* key)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
+ REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_,
+ !deoptimized_methods_lock_);
// Deoptimize a method by forcing its execution with the interpreter. Nevertheless, a static
// method (except a class initializer) set to the resolution trampoline will be deoptimized only
// once its declaring class is initialized.
void Deoptimize(ArtMethod* method)
- LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !deoptimized_methods_lock_);
// Undeoptimze the method by restoring its entrypoints. Nevertheless, a static method
// (except a class initializer) set to the resolution trampoline will be updated only once its
// declaring class is initialized.
void Undeoptimize(ArtMethod* method)
- LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !deoptimized_methods_lock_);
// Indicates whether the method has been deoptimized so it is executed with the interpreter.
bool IsDeoptimized(ArtMethod* method)
- LOCKS_EXCLUDED(deoptimized_methods_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!deoptimized_methods_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
// Enable method tracing by installing instrumentation entry/exit stubs or interpreter.
void EnableMethodTracing(const char* key,
bool needs_interpreter = kDeoptimizeForAccurateMethodEntryExitListeners)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
+ REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_,
+ !deoptimized_methods_lock_);
// Disable method tracing by uninstalling instrumentation entry/exit stubs or interpreter.
void DisableMethodTracing(const char* key)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
+ REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_,
+ !deoptimized_methods_lock_);
InterpreterHandlerTable GetInterpreterHandlerTable() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return interpreter_handler_table_;
}
- void InstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::instrument_entrypoints_lock_);
- void UninstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::instrument_entrypoints_lock_);
+ void InstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_);
+ void UninstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_);
void InstrumentQuickAllocEntryPointsLocked()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::instrument_entrypoints_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::runtime_shutdown_lock_);
+ REQUIRES(Locks::instrument_entrypoints_lock_, !Locks::thread_list_lock_,
+ !Locks::runtime_shutdown_lock_);
void UninstrumentQuickAllocEntryPointsLocked()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::instrument_entrypoints_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::runtime_shutdown_lock_);
- void ResetQuickAllocEntryPoints() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
+ REQUIRES(Locks::instrument_entrypoints_lock_, !Locks::thread_list_lock_,
+ !Locks::runtime_shutdown_lock_);
+ void ResetQuickAllocEntryPoints() REQUIRES(Locks::runtime_shutdown_lock_);
// Update the code of a method respecting any installed stubs.
void UpdateMethodsCode(ArtMethod* method, const void* quick_code)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Get the quick code for the given method. More efficient than asking the class linker as it
// will short-cut to GetCode if instrumentation and static method resolution stubs aren't
// installed.
const void* GetQuickCodeFor(ArtMethod* method, size_t pointer_size) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void ForceInterpretOnly() {
interpret_only_ = true;
@@ -232,39 +225,39 @@
return instrumentation_stubs_installed_;
}
- bool HasMethodEntryListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool HasMethodEntryListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
return have_method_entry_listeners_;
}
- bool HasMethodExitListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool HasMethodExitListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
return have_method_exit_listeners_;
}
- bool HasMethodUnwindListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool HasMethodUnwindListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
return have_method_unwind_listeners_;
}
- bool HasDexPcListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool HasDexPcListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
return have_dex_pc_listeners_;
}
- bool HasFieldReadListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool HasFieldReadListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
return have_field_read_listeners_;
}
- bool HasFieldWriteListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool HasFieldWriteListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
return have_field_write_listeners_;
}
- bool HasExceptionCaughtListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool HasExceptionCaughtListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
return have_exception_caught_listeners_;
}
- bool HasBackwardBranchListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool HasBackwardBranchListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
return have_backward_branch_listeners_;
}
- bool IsActive() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsActive() const SHARED_REQUIRES(Locks::mutator_lock_) {
return have_dex_pc_listeners_ || have_method_entry_listeners_ || have_method_exit_listeners_ ||
have_field_read_listeners_ || have_field_write_listeners_ ||
have_exception_caught_listeners_ || have_method_unwind_listeners_;
@@ -274,7 +267,7 @@
// listeners into executing code and get method enter events for methods already on the stack.
void MethodEnterEvent(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (UNLIKELY(HasMethodEntryListeners())) {
MethodEnterEventImpl(thread, this_object, method, dex_pc);
}
@@ -284,7 +277,7 @@
void MethodExitEvent(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
const JValue& return_value) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (UNLIKELY(HasMethodExitListeners())) {
MethodExitEventImpl(thread, this_object, method, dex_pc, return_value);
}
@@ -293,12 +286,12 @@
// Inform listeners that a method has been exited due to an exception.
void MethodUnwindEvent(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Inform listeners that the dex pc has moved (only supported by the interpreter).
void DexPcMovedEvent(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (UNLIKELY(HasDexPcListeners())) {
DexPcMovedEventImpl(thread, this_object, method, dex_pc);
}
@@ -306,7 +299,7 @@
// Inform listeners that a backward branch has been taken (only supported by the interpreter).
void BackwardBranch(Thread* thread, ArtMethod* method, int32_t offset) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (UNLIKELY(HasBackwardBranchListeners())) {
BackwardBranchImpl(thread, method, offset);
}
@@ -316,7 +309,7 @@
void FieldReadEvent(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
ArtField* field) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (UNLIKELY(HasFieldReadListeners())) {
FieldReadEventImpl(thread, this_object, method, dex_pc, field);
}
@@ -326,7 +319,7 @@
void FieldWriteEvent(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
ArtField* field, const JValue& field_value) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (UNLIKELY(HasFieldWriteListeners())) {
FieldWriteEventImpl(thread, this_object, method, dex_pc, field, field_value);
}
@@ -334,30 +327,31 @@
// Inform listeners that an exception was caught.
void ExceptionCaughtEvent(Thread* thread, mirror::Throwable* exception_object) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Called when an instrumented method is entered. The intended link register (lr) is saved so
// that returning causes a branch to the method exit stub. Generates method enter events.
void PushInstrumentationStackFrame(Thread* self, mirror::Object* this_object,
ArtMethod* method, uintptr_t lr,
bool interpreter_entry)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Called when an instrumented method is exited. Removes the pushed instrumentation frame
// returning the intended link register. Generates method exit events.
TwoWordReturn PopInstrumentationStackFrame(Thread* self, uintptr_t* return_pc,
uint64_t gpr_result, uint64_t fpr_result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Pops an instrumentation frame from the current thread and generate an unwind event.
void PopMethodForUnwind(Thread* self, bool is_deoptimization) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Call back for configure stubs.
- void InstallStubsForClass(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void InstallStubsForClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!deoptimized_methods_lock_);
void InstallStubsForMethod(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
private:
InstrumentationLevel GetCurrentInstrumentationLevel() const;
@@ -368,11 +362,10 @@
// instrumentation level it needs. Therefore the current instrumentation level
// becomes the highest instrumentation level required by a client.
void ConfigureStubs(const char* key, InstrumentationLevel desired_instrumentation_level)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_,
- deoptimized_methods_lock_);
+ REQUIRES(Locks::mutator_lock_, !deoptimized_methods_lock_, !Locks::thread_list_lock_,
+ !Locks::classlinker_classes_lock_);
- void UpdateInterpreterHandlerTable() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void UpdateInterpreterHandlerTable() REQUIRES(Locks::mutator_lock_) {
interpreter_handler_table_ = IsActive() ? kAlternativeHandlerTable : kMainHandlerTable;
}
@@ -382,38 +375,36 @@
void MethodEnterEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void MethodExitEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method,
uint32_t dex_pc, const JValue& return_value) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void BackwardBranchImpl(Thread* thread, ArtMethod* method, int32_t offset) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
ArtField* field) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void FieldWriteEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
ArtField* field, const JValue& field_value) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Read barrier-aware utility functions for accessing deoptimized_methods_
bool AddDeoptimizedMethod(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(deoptimized_methods_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_);
bool IsDeoptimizedMethod(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, deoptimized_methods_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_);
bool RemoveDeoptimizedMethod(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(deoptimized_methods_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_);
ArtMethod* BeginDeoptimizedMethod()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, deoptimized_methods_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_);
bool IsDeoptimizedMethodsEmpty() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, deoptimized_methods_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_);
// Have we hijacked ArtMethod::code_ so that it calls instrumentation/interpreter code?
bool instrumentation_stubs_installed_;
@@ -508,7 +499,7 @@
interpreter_entry_(interpreter_entry) {
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Object* this_object_;
ArtMethod* method_;
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 85bb8c4..b49f7e1 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -44,7 +44,7 @@
mirror::Object* this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
received_method_enter_event = true;
}
@@ -53,7 +53,7 @@
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
const JValue& return_value ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
received_method_exit_event = true;
}
@@ -61,7 +61,7 @@
mirror::Object* this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
received_method_unwind_event = true;
}
@@ -69,7 +69,7 @@
mirror::Object* this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t new_dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
received_dex_pc_moved_event = true;
}
@@ -78,7 +78,7 @@
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
received_field_read_event = true;
}
@@ -88,20 +88,20 @@
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED,
const JValue& field_value ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
received_field_written_event = true;
}
void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED,
mirror::Throwable* exception_object ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
received_exception_caught_event = true;
}
void BackwardBranch(Thread* thread ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
int32_t dex_pc_offset ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
received_backward_branch_event = true;
}
@@ -198,7 +198,7 @@
}
void DeoptimizeMethod(Thread* self, ArtMethod* method, bool enable_deoptimization)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
self->TransitionFromRunnableToSuspended(kSuspended);
@@ -213,7 +213,7 @@
void UndeoptimizeMethod(Thread* self, ArtMethod* method,
const char* key, bool disable_deoptimization)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
self->TransitionFromRunnableToSuspended(kSuspended);
@@ -227,7 +227,7 @@
}
void DeoptimizeEverything(Thread* self, const char* key, bool enable_deoptimization)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
self->TransitionFromRunnableToSuspended(kSuspended);
@@ -241,7 +241,7 @@
}
void UndeoptimizeEverything(Thread* self, const char* key, bool disable_deoptimization)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
self->TransitionFromRunnableToSuspended(kSuspended);
@@ -255,7 +255,7 @@
}
void EnableMethodTracing(Thread* self, const char* key, bool needs_interpreter)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
self->TransitionFromRunnableToSuspended(kSuspended);
@@ -266,7 +266,7 @@
}
void DisableMethodTracing(Thread* self, const char* key)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
self->TransitionFromRunnableToSuspended(kSuspended);
@@ -278,7 +278,7 @@
private:
static bool HasEventListener(const instrumentation::Instrumentation* instr, uint32_t event_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
switch (event_type) {
case instrumentation::Instrumentation::kMethodEntered:
return instr->HasMethodEntryListeners();
@@ -305,7 +305,7 @@
static void ReportEvent(const instrumentation::Instrumentation* instr, uint32_t event_type,
Thread* self, ArtMethod* method, mirror::Object* obj,
uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
switch (event_type) {
case instrumentation::Instrumentation::kMethodEntered:
instr->MethodEnterEvent(self, obj, method, dex_pc);
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index ef08d74..0be6675 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -58,70 +58,74 @@
// Interns a potentially new string in the 'strong' table. May cause thread suspension.
mirror::String* InternStrong(int32_t utf16_length, const char* utf8_data)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- // Only used by image writer.
- mirror::String* InternImageString(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Only used by image writer. Special version that may not cause thread suspension since the GC
+ // can not be running while we are doing image writing.
+ mirror::String* InternImageString(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_);
// Interns a potentially new string in the 'strong' table. May cause thread suspension.
- mirror::String* InternStrong(const char* utf8_data)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::String* InternStrong(const char* utf8_data) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
// Interns a potentially new string in the 'strong' table. May cause thread suspension.
- mirror::String* InternStrong(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::String* InternStrong(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
// Interns a potentially new string in the 'weak' table. May cause thread suspension.
- mirror::String* InternWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::String* InternWeak(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
- void SweepInternTableWeaks(IsMarkedVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SweepInternTableWeaks(IsMarkedVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::intern_table_lock_);
- bool ContainsWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool ContainsWeak(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::intern_table_lock_);
// Total number of interned strings.
- size_t Size() const LOCKS_EXCLUDED(Locks::intern_table_lock_);
+ size_t Size() const REQUIRES(!Locks::intern_table_lock_);
// Total number of weakly live interned strings.
- size_t StrongSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_);
+ size_t StrongSize() const REQUIRES(!Locks::intern_table_lock_);
// Total number of strongly live interned strings.
- size_t WeakSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_);
+ size_t WeakSize() const REQUIRES(!Locks::intern_table_lock_);
void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
- void DumpForSigQuit(std::ostream& os) const;
+ void DumpForSigQuit(std::ostream& os) const REQUIRES(!Locks::intern_table_lock_);
- void DisallowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void AllowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void EnsureNewInternsDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void BroadcastForNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void EnsureNewWeakInternsDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void DisallowNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
+ void AllowNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
+ void EnsureNewInternsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_);
+ void BroadcastForNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
+ void EnsureNewWeakInternsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_);
// Adds all of the resolved image strings from the image space into the intern table. The
// advantage of doing this is preventing expensive DexFile::FindStringId calls.
void AddImageStringsToTable(gc::space::ImageSpace* image_space)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
+
// Copy the post zygote tables to pre zygote to save memory by preventing dirty pages.
void SwapPostZygoteWithPreZygote()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
// Add an intern table which was serialized to the image.
void AddImageInternTable(gc::space::ImageSpace* image_space)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
// Read the intern table from memory. The elements aren't copied, the intern hash set data will
// point to somewhere within ptr. Only reads the strong interns.
- size_t ReadFromMemory(const uint8_t* ptr) LOCKS_EXCLUDED(Locks::intern_table_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t ReadFromMemory(const uint8_t* ptr) REQUIRES(!Locks::intern_table_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Write the post zygote intern table to a pointer. Only writes the strong interns since it is
// expected that there is no weak interns since this is called from the image writer.
- size_t WriteToMemory(uint8_t* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::intern_table_lock_);
+ size_t WriteToMemory(uint8_t* ptr) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::intern_table_lock_);
// Change the weak root state. May broadcast to waiters.
void ChangeWeakRootState(gc::WeakRootState new_state)
- LOCKS_EXCLUDED(Locks::intern_table_lock_);
+ REQUIRES(!Locks::intern_table_lock_);
private:
class StringHashEquals {
@@ -144,39 +148,33 @@
// weak interns and strong interns.
class Table {
public:
- mirror::String* Find(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
- void Insert(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ mirror::String* Find(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::intern_table_lock_);
+ void Insert(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::intern_table_lock_);
void Remove(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void VisitRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void SweepWeaks(IsMarkedVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
- void SwapPostZygoteWithPreZygote() EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
- size_t Size() const EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ void SwapPostZygoteWithPreZygote() REQUIRES(Locks::intern_table_lock_);
+ size_t Size() const REQUIRES(Locks::intern_table_lock_);
// Read pre zygote table is called from ReadFromMemory which happens during runtime creation
// when we load the image intern table. Returns how many bytes were read.
size_t ReadIntoPreZygoteTable(const uint8_t* ptr)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
// The image writer calls WritePostZygoteTable through WriteToMemory, it writes the interns in
// the post zygote table. Returns how many bytes were written.
size_t WriteFromPostZygoteTable(uint8_t* ptr)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
private:
typedef HashSet<GcRoot<mirror::String>, GcRootEmptyFn, StringHashEquals, StringHashEquals,
TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>> UnorderedSet;
void SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
// We call SwapPostZygoteWithPreZygote when we create the zygote to reduce private dirty pages
// caused by modifying the zygote intern table hash table. The pre zygote table are the
@@ -188,57 +186,43 @@
// Insert if non null, otherwise return null.
mirror::String* Insert(mirror::String* s, bool is_strong, bool holding_locks)
- LOCKS_EXCLUDED(Locks::intern_table_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
mirror::String* LookupStrong(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* LookupWeak(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* InsertStrong(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* InsertWeak(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void RemoveStrong(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void RemoveWeak(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
// Transaction rollback access.
mirror::String* LookupStringFromImage(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* InsertStrongFromTransaction(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* InsertWeakFromTransaction(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void RemoveStrongFromTransaction(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void RemoveWeakFromTransaction(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
- friend class Transaction;
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
size_t ReadFromMemoryLocked(const uint8_t* ptr)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
// Change the weak root state. May broadcast to waiters.
void ChangeWeakRootStateLocked(gc::WeakRootState new_state)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ REQUIRES(Locks::intern_table_lock_);
// Wait until we can read weak roots.
- void WaitUntilAccessible(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void WaitUntilAccessible(Thread* self)
+ REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
bool image_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_);
@@ -256,6 +240,9 @@
Table weak_interns_ GUARDED_BY(Locks::intern_table_lock_);
// Weak root state, used for concurrent system weak processing and more.
gc::WeakRootState weak_root_state_ GUARDED_BY(Locks::intern_table_lock_);
+
+ friend class Transaction;
+ DISALLOW_COPY_AND_ASSIGN(InternTable);
};
} // namespace art
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index c987180..b60b32d 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -62,7 +62,7 @@
class TestPredicate : public IsMarkedVisitor {
public:
- mirror::Object* IsMarked(mirror::Object* s) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* IsMarked(mirror::Object* s) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
bool erased = false;
for (auto it = expected_.begin(), end = expected_.end(); it != end; ++it) {
if (*it == s) {
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 26860e7..6c6232c 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -28,7 +28,7 @@
static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& shorty,
Object* receiver, uint32_t* args, JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// TODO: The following enters JNI code using a typedef-ed function rather than the JNI compiler,
// it should be removed and JNI compiled stubs used instead.
ScopedObjectAccessUnchecked soa(self);
@@ -240,23 +240,23 @@
UNREACHABLE();
}
// Explicit definitions of ExecuteGotoImpl.
-template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template<> SHARED_REQUIRES(Locks::mutator_lock_)
JValue ExecuteGotoImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
-template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template<> SHARED_REQUIRES(Locks::mutator_lock_)
JValue ExecuteGotoImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
-template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template<> SHARED_REQUIRES(Locks::mutator_lock_)
JValue ExecuteGotoImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
-template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template<> SHARED_REQUIRES(Locks::mutator_lock_)
JValue ExecuteGotoImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
#endif
static JValue Execute(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame,
JValue result_register)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register) {
@@ -395,7 +395,7 @@
}
void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, JValue* ret_val)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
JValue value;
// Set value to last known result in case the shadow frame chain is empty.
value.SetJ(ret_val->GetJ());
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index 446c5bb..61140a2 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -35,26 +35,26 @@
// Called by ArtMethod::Invoke, shadow frames arguments are taken from the args array.
extern void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method,
mirror::Object* receiver, uint32_t* args, JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
extern void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame,
JValue* ret_val)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
extern JValue EnterInterpreterFromEntryPoint(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
} // namespace interpreter
extern "C" void artInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame, JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame, JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
} // namespace art
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 0980ea1..9de9e8a 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -192,7 +192,7 @@
template<Primitive::Type field_type>
static JValue GetFieldValue(const ShadowFrame& shadow_frame, uint32_t vreg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
JValue field_value;
switch (field_type) {
case Primitive::kPrimBoolean:
@@ -450,7 +450,7 @@
// Assign register 'src_reg' from shadow_frame to register 'dest_reg' into new_shadow_frame.
static inline void AssignRegister(ShadowFrame* new_shadow_frame, const ShadowFrame& shadow_frame,
size_t dest_reg, size_t src_reg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Uint required, so that sign extension does not make this wrong on 64b systems
uint32_t src_value = shadow_frame.GetVReg(src_reg);
mirror::Object* o = shadow_frame.GetVRegReference<kVerifyNone>(src_reg);
@@ -482,7 +482,7 @@
}
// Separate declaration is required solely for the attributes.
-template<bool is_range, bool do_assignability_check> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template<bool is_range, bool do_assignability_check> SHARED_REQUIRES(Locks::mutator_lock_)
static inline bool DoCallCommon(ArtMethod* called_method,
Thread* self,
ShadowFrame& shadow_frame,
@@ -491,7 +491,7 @@
uint32_t arg[Instruction::kMaxVarArgRegs],
uint32_t vregC) ALWAYS_INLINE;
-SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+SHARED_REQUIRES(Locks::mutator_lock_)
static inline bool NeedsInterpreter(Thread* self, ShadowFrame* new_shadow_frame) ALWAYS_INLINE;
static inline bool NeedsInterpreter(Thread* self, ShadowFrame* new_shadow_frame) {
@@ -834,7 +834,7 @@
return true;
}
-// TODO fix thread analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+// TODO fix thread analysis: should be SHARED_REQUIRES(Locks::mutator_lock_).
template<typename T>
static void RecordArrayElementsInTransactionImpl(mirror::PrimitiveArray<T>* array, int32_t count)
NO_THREAD_SAFETY_ANALYSIS {
@@ -845,7 +845,7 @@
}
void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(Runtime::Current()->IsActiveTransaction());
DCHECK(array != nullptr);
DCHECK_LE(count, array->GetLength());
@@ -884,7 +884,7 @@
// Explicit DoCall template function declarations.
#define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
- template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
+ template SHARED_REQUIRES(Locks::mutator_lock_) \
bool DoCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \
ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
@@ -897,7 +897,7 @@
// Explicit DoLambdaCall template function declarations.
#define EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
- template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
+ template SHARED_REQUIRES(Locks::mutator_lock_) \
bool DoLambdaCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \
ShadowFrame& shadow_frame, \
const Instruction* inst, \
@@ -911,7 +911,7 @@
// Explicit DoFilledNewArray template function declarations.
#define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check, _transaction_active) \
- template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
+ template SHARED_REQUIRES(Locks::mutator_lock_) \
bool DoFilledNewArray<_is_range_, _check, _transaction_active>(const Instruction* inst, \
const ShadowFrame& shadow_frame, \
Thread* self, JValue* result)
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 9babb18..a6cccef 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -72,7 +72,7 @@
ShadowFrame& shadow_frame, JValue result_register);
void ThrowNullPointerExceptionFromInterpreter()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static inline void DoMonitorEnter(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
ref->MonitorEnter(self);
@@ -84,13 +84,13 @@
void AbortTransactionF(Thread* self, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void AbortTransactionV(Thread* self, const char* fmt, va_list args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Invokes the given method. This is part of the invocation support and is used by DoInvoke and
// DoInvokeVirtualQuick functions.
@@ -114,7 +114,7 @@
//
// If the validation fails, return false and raise an exception.
static inline bool IsValidLambdaTargetOrThrow(ArtMethod* called_method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
bool success = false;
if (UNLIKELY(called_method == nullptr)) {
@@ -191,7 +191,7 @@
// (Exceptions are thrown by creating a new exception and then being put in the thread TLS)
static inline ArtMethod* ReadLambdaClosureFromVRegsOrThrow(ShadowFrame& shadow_frame,
uint32_t vreg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// TODO(iam): Introduce a closure abstraction that will contain the captured variables
// instead of just an ArtMethod.
// This is temporarily using 2 vregs because a native ArtMethod can be up to 64-bit,
@@ -306,32 +306,32 @@
// Returns true on success, otherwise throws an exception and returns false.
template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
- uint16_t inst_data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_);
// Handles iget-quick, iget-wide-quick and iget-object-quick instructions.
// Returns true on success, otherwise throws an exception and returns false.
template<Primitive::Type field_type>
bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Handles iput-XXX and sput-XXX instructions.
// Returns true on success, otherwise throws an exception and returns false.
template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
bool transaction_active>
bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction* inst,
- uint16_t inst_data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_);
// Handles iput-quick, iput-wide-quick and iput-object-quick instructions.
// Returns true on success, otherwise throws an exception and returns false.
template<Primitive::Type field_type, bool transaction_active>
bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Handles string resolution for const-string and const-string-jumbo instructions. Also ensures the
// java.lang.String class is initialized.
static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uint32_t string_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Class* java_lang_string_class = String::GetJavaLangString();
if (UNLIKELY(!java_lang_string_class->IsInitialized())) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -358,7 +358,7 @@
// Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg,
int32_t dividend, int32_t divisor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
@@ -376,7 +376,7 @@
// Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg,
int32_t dividend, int32_t divisor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
@@ -394,7 +394,7 @@
// Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg,
int64_t dividend, int64_t divisor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const int64_t kMinLong = std::numeric_limits<int64_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
@@ -412,7 +412,7 @@
// Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
static inline bool DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg,
int64_t dividend, int64_t divisor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const int64_t kMinLong = std::numeric_limits<int64_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
@@ -436,7 +436,7 @@
// Returns the branch offset to the next instruction to execute.
static inline int32_t DoPackedSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
uint16_t inst_data)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(inst->Opcode() == Instruction::PACKED_SWITCH);
const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
@@ -464,7 +464,7 @@
// Returns the branch offset to the next instruction to execute.
static inline int32_t DoSparseSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
uint16_t inst_data)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(inst->Opcode() == Instruction::SPARSE_SWITCH);
const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
@@ -497,7 +497,7 @@
template <bool _do_check>
static inline bool DoBoxLambda(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
- uint16_t inst_data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_) {
/*
* box-lambda vA, vB /// opcode 0xf8, format 22x
* - vA is the target register where the Object representation of the closure will be stored into
@@ -529,7 +529,7 @@
return true;
}
-template <bool _do_check> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template <bool _do_check> SHARED_REQUIRES(Locks::mutator_lock_)
static inline bool DoUnboxLambda(Thread* self,
ShadowFrame& shadow_frame,
const Instruction* inst,
@@ -553,7 +553,7 @@
ArtMethod* unboxed_closure = nullptr;
// Raise an exception if unboxing fails.
if (!Runtime::Current()->GetLambdaBoxTable()->UnboxLambda(boxed_closure_object,
- &unboxed_closure)) {
+ outof(unboxed_closure))) {
CHECK(self->IsExceptionPending());
return false;
}
@@ -565,15 +565,15 @@
uint32_t FindNextInstructionFollowingException(Thread* self, ShadowFrame& shadow_frame,
uint32_t dex_pc, const instrumentation::Instrumentation* instrumentation)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
NO_RETURN void UnexpectedOpcode(const Instruction* inst, const ShadowFrame& shadow_frame)
__attribute__((cold))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruction* inst,
const uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
constexpr bool kTracing = false;
if (kTracing) {
#define TRACE_LOG std::cerr
@@ -605,7 +605,7 @@
// Explicitly instantiate all DoInvoke functions.
#define EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, _is_range, _do_check) \
- template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
+ template SHARED_REQUIRES(Locks::mutator_lock_) \
bool DoInvoke<_type, _is_range, _do_check>(Thread* self, ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
JValue* result)
@@ -626,7 +626,7 @@
// Explicitly instantiate all DoInvokeVirtualQuick functions.
#define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range) \
- template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
+ template SHARED_REQUIRES(Locks::mutator_lock_) \
bool DoInvokeVirtualQuick<_is_range>(Thread* self, ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
JValue* result)
@@ -637,7 +637,7 @@
// Explicitly instantiate all DoCreateLambda functions.
#define EXPLICIT_DO_CREATE_LAMBDA_DECL(_do_check) \
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
+template SHARED_REQUIRES(Locks::mutator_lock_) \
bool DoCreateLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, \
const Instruction* inst)
@@ -647,7 +647,7 @@
// Explicitly instantiate all DoInvokeLambda functions.
#define EXPLICIT_DO_INVOKE_LAMBDA_DECL(_do_check) \
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
+template SHARED_REQUIRES(Locks::mutator_lock_) \
bool DoInvokeLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
uint16_t inst_data, JValue* result);
@@ -657,7 +657,7 @@
// Explicitly instantiate all DoBoxLambda functions.
#define EXPLICIT_DO_BOX_LAMBDA_DECL(_do_check) \
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
+template SHARED_REQUIRES(Locks::mutator_lock_) \
bool DoBoxLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
uint16_t inst_data);
@@ -667,7 +667,7 @@
// Explicitly instantiate all DoUnBoxLambda functions.
#define EXPLICIT_DO_UNBOX_LAMBDA_DECL(_do_check) \
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
+template SHARED_REQUIRES(Locks::mutator_lock_) \
bool DoUnboxLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
uint16_t inst_data);
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index ec923b6..7027cbf 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -2536,16 +2536,16 @@
} // NOLINT(readability/fn_size)
// Explicit definitions of ExecuteGotoImpl.
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR
+template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR
JValue ExecuteGotoImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR
+template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR
JValue ExecuteGotoImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template SHARED_REQUIRES(Locks::mutator_lock_)
JValue ExecuteGotoImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template SHARED_REQUIRES(Locks::mutator_lock_)
JValue ExecuteGotoImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 78090bb..544f788 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -2283,16 +2283,16 @@
} // NOLINT(readability/fn_size)
// Explicit definitions of ExecuteSwitchImpl.
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR
+template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR
JValue ExecuteSwitchImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR
+template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR
JValue ExecuteSwitchImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template SHARED_REQUIRES(Locks::mutator_lock_)
JValue ExecuteSwitchImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template SHARED_REQUIRES(Locks::mutator_lock_)
JValue ExecuteSwitchImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 43e24fa..22701ac 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -46,7 +46,7 @@
static void AbortTransactionOrFail(Thread* self, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void AbortTransactionOrFail(Thread* self, const char* fmt, ...) {
va_list args;
@@ -69,7 +69,7 @@
Handle<mirror::ClassLoader> class_loader, JValue* result,
const std::string& method_name, bool initialize_class,
bool abort_if_not_found)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(className.Get() != nullptr);
std::string descriptor(DotToDescriptor(className->ToModifiedUtf8().c_str()));
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -99,7 +99,7 @@
// actually the transaction abort exception. This must not be wrapped, as it signals an
// initialization abort.
static void CheckExceptionGenerateClassNotFound(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (self->IsExceptionPending()) {
// If it is not the transaction abort exception, wrap it.
std::string type(PrettyTypeOf(self->GetException()));
@@ -111,7 +111,7 @@
}
static mirror::String* GetClassName(Thread* self, ShadowFrame* shadow_frame, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Object* param = shadow_frame->GetVRegReference(arg_offset);
if (param == nullptr) {
AbortTransactionOrFail(self, "Null-pointer in Class.forName.");
@@ -294,7 +294,7 @@
mirror::Array* src_array, int32_t src_pos,
mirror::Array* dst_array, int32_t dst_pos,
int32_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (src_array->GetClass()->GetComponentType() != dst_array->GetClass()->GetComponentType()) {
AbortTransactionOrFail(self, "Types mismatched in arraycopy: %s vs %s.",
PrettyDescriptor(src_array->GetClass()->GetComponentType()).c_str(),
@@ -490,7 +490,7 @@
}
static mirror::Object* GetDexFromDexCache(Thread* self, mirror::DexCache* dex_cache)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile* dex_file = dex_cache->GetDexFile();
if (dex_file == nullptr) {
return nullptr;
@@ -601,7 +601,7 @@
static void UnstartedMemoryPeekArray(
Primitive::Type type, Thread* self, ShadowFrame* shadow_frame, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
int64_t address_long = shadow_frame->GetVRegLong(arg_offset);
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 2);
if (obj == nullptr) {
@@ -840,7 +840,7 @@
// This allows getting the char array for new style of String objects during compilation.
void UnstartedRuntime::UnstartedStringToCharArray(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::String* string = shadow_frame->GetVRegReference(arg_offset)->AsString();
if (string == nullptr) {
AbortTransactionOrFail(self, "String.charAt with null object");
diff --git a/runtime/interpreter/unstarted_runtime.h b/runtime/interpreter/unstarted_runtime.h
index a357d5f..03d7026 100644
--- a/runtime/interpreter/unstarted_runtime.h
+++ b/runtime/interpreter/unstarted_runtime.h
@@ -52,14 +52,14 @@
ShadowFrame* shadow_frame,
JValue* result,
size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void Jni(Thread* self,
ArtMethod* method,
mirror::Object* receiver,
uint32_t* args,
JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
// Methods that intercept available libcore implementations.
@@ -68,7 +68,7 @@
ShadowFrame* shadow_frame, \
JValue* result, \
size_t arg_offset) \
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
#include "unstarted_runtime_list.h"
UNSTARTED_RUNTIME_DIRECT_LIST(UNSTARTED_DIRECT)
#undef UNSTARTED_RUNTIME_DIRECT_LIST
@@ -82,7 +82,7 @@
mirror::Object* receiver, \
uint32_t* args, \
JValue* result) \
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
#include "unstarted_runtime_list.h"
UNSTARTED_RUNTIME_JNI_LIST(UNSTARTED_JNI)
#undef UNSTARTED_RUNTIME_DIRECT_LIST
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index 4b672e0..a1ae2aa 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -42,7 +42,7 @@
ShadowFrame* shadow_frame, \
JValue* result, \
size_t arg_offset) \
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ SHARED_REQUIRES(Locks::mutator_lock_) { \
interpreter::UnstartedRuntime::Unstarted ## Name(self, shadow_frame, result, arg_offset); \
}
#include "unstarted_runtime_list.h"
@@ -58,7 +58,7 @@
mirror::Object* receiver, \
uint32_t* args, \
JValue* result) \
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ SHARED_REQUIRES(Locks::mutator_lock_) { \
interpreter::UnstartedRuntime::UnstartedJNI ## Name(self, method, receiver, args, result); \
}
#include "unstarted_runtime_list.h"
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 36e3aa3..9d41018 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -87,7 +87,7 @@
* If the call has not yet finished in another thread, wait for it.
*/
bool CheckOnLoadResult()
- LOCKS_EXCLUDED(jni_on_load_lock_) {
+ REQUIRES(!jni_on_load_lock_) {
Thread* self = Thread::Current();
bool okay;
{
@@ -112,7 +112,7 @@
return okay;
}
- void SetResult(bool result) LOCKS_EXCLUDED(jni_on_load_lock_) {
+ void SetResult(bool result) REQUIRES(!jni_on_load_lock_) {
Thread* self = Thread::Current();
MutexLock mu(self, jni_on_load_lock_);
@@ -210,8 +210,8 @@
// See section 11.3 "Linking Native Methods" of the JNI spec.
void* FindNativeMethod(ArtMethod* m, std::string& detail)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::jni_libraries_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ REQUIRES(Locks::jni_libraries_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::string jni_short_name(JniShortName(m));
std::string jni_long_name(JniLongName(m));
const mirror::ClassLoader* declaring_class_loader = m->GetDeclaringClass()->GetClassLoader();
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index 97fbbc5..d70fc47 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -77,7 +77,7 @@
// such as NewByteArray.
// If -verbose:third-party-jni is on, we want to log any JNI function calls
// made by a third-party native method.
- bool ShouldTrace(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool ShouldTrace(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
/**
* Loads the given shared library. 'path' is an absolute pathname.
@@ -93,56 +93,57 @@
* using dlsym(3) on every native library that's been loaded so far.
*/
void* FindCodeForNativeMethod(ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os)
- LOCKS_EXCLUDED(Locks::jni_libraries_lock_, globals_lock_, weak_globals_lock_);
+ REQUIRES(!Locks::jni_libraries_lock_, !globals_lock_, !weak_globals_lock_);
void DumpReferenceTables(std::ostream& os)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_, !weak_globals_lock_);
bool SetCheckJniEnabled(bool enabled);
- void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!globals_lock_);
- void DisallowNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void AllowNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void EnsureNewWeakGlobalsDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void BroadcastForNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void DisallowNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ void AllowNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ void EnsureNewWeakGlobalsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!weak_globals_lock_);
+ void BroadcastForNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!weak_globals_lock_);
jobject AddGlobalRef(Thread* self, mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_);
jweak AddWeakGlobalRef(Thread* self, mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
- void DeleteGlobalRef(Thread* self, jobject obj);
+ void DeleteGlobalRef(Thread* self, jobject obj) REQUIRES(!globals_lock_);
- void DeleteWeakGlobalRef(Thread* self, jweak obj);
+ void DeleteWeakGlobalRef(Thread* self, jweak obj) REQUIRES(!weak_globals_lock_);
void SweepJniWeakGlobals(IsMarkedVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
mirror::Object* DecodeGlobal(Thread* self, IndirectRef ref)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void UpdateGlobal(Thread* self, IndirectRef ref, mirror::Object* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(globals_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_);
mirror::Object* DecodeWeakGlobal(Thread* self, IndirectRef ref)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
void UpdateWeakGlobal(Thread* self, IndirectRef ref, mirror::Object* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(weak_globals_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
const JNIInvokeInterface* GetUncheckedFunctions() const {
return unchecked_functions_;
}
- void TrimGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(globals_lock_);
+ void TrimGlobals() SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!globals_lock_);
private:
Runtime* const runtime_;
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index 7c48985..f5ac9d0 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -88,7 +88,7 @@
uint64_t dex_pc;
};
std::ostream& operator<<(std::ostream& os, const JdwpLocation& rhs)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool operator==(const JdwpLocation& lhs, const JdwpLocation& rhs);
bool operator!=(const JdwpLocation& lhs, const JdwpLocation& rhs);
@@ -130,7 +130,7 @@
* Returns a newly-allocated JdwpState struct on success, or nullptr on failure.
*/
static JdwpState* Create(const JdwpOptions* options)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
~JdwpState();
@@ -155,15 +155,15 @@
// thread (command handler) so no event thread posts an event while
// it processes a command. This must be called only from the debugger
// thread.
- void AcquireJdwpTokenForCommand() LOCKS_EXCLUDED(jdwp_token_lock_);
- void ReleaseJdwpTokenForCommand() LOCKS_EXCLUDED(jdwp_token_lock_);
+ void AcquireJdwpTokenForCommand() REQUIRES(!jdwp_token_lock_);
+ void ReleaseJdwpTokenForCommand() REQUIRES(!jdwp_token_lock_);
// Acquires/releases the JDWP synchronization token for the event thread
// so no other thread (debugger thread or event thread) interleaves with
// it when posting an event. This must NOT be called from the debugger
// thread, only event thread.
- void AcquireJdwpTokenForEvent(ObjectId threadId) LOCKS_EXCLUDED(jdwp_token_lock_);
- void ReleaseJdwpTokenForEvent() LOCKS_EXCLUDED(jdwp_token_lock_);
+ void AcquireJdwpTokenForEvent(ObjectId threadId) REQUIRES(!jdwp_token_lock_);
+ void ReleaseJdwpTokenForEvent() REQUIRES(!jdwp_token_lock_);
/*
* These notify the debug code that something interesting has happened. This
@@ -183,7 +183,7 @@
* The VM has finished initializing. Only called when the debugger is
* connected at the time initialization completes.
*/
- void PostVMStart() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void PostVMStart() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_);
/*
* A location of interest has been reached. This is used for breakpoints,
@@ -199,8 +199,7 @@
*/
void PostLocationEvent(const EventLocation* pLoc, mirror::Object* thisPtr, int eventFlags,
const JValue* returnValue)
- LOCKS_EXCLUDED(event_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
/*
* A field of interest has been accessed or modified. This is used for field access and field
@@ -211,8 +210,7 @@
*/
void PostFieldEvent(const EventLocation* pLoc, ArtField* field, mirror::Object* thisPtr,
const JValue* fieldValue, bool is_modification)
- LOCKS_EXCLUDED(event_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
/*
* An exception has been thrown.
@@ -221,22 +219,19 @@
*/
void PostException(const EventLocation* pThrowLoc, mirror::Throwable* exception_object,
const EventLocation* pCatchLoc, mirror::Object* thisPtr)
- LOCKS_EXCLUDED(event_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
/*
* A thread has started or stopped.
*/
void PostThreadChange(Thread* thread, bool start)
- LOCKS_EXCLUDED(event_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Class has been prepared.
*/
void PostClassPrepare(mirror::Class* klass)
- LOCKS_EXCLUDED(event_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
/*
* The VM is about to stop.
@@ -244,7 +239,7 @@
bool PostVMDeath();
// Called if/when we realize we're talking to DDMS.
- void NotifyDdmsActive() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void NotifyDdmsActive() SHARED_REQUIRES(Locks::mutator_lock_);
void SetupChunkHeader(uint32_t type, size_t data_len, size_t header_size, uint8_t* out_header);
@@ -253,23 +248,23 @@
* Send up a chunk of DDM data.
*/
void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- bool HandlePacket();
+ bool HandlePacket() REQUIRES(!shutdown_lock_, !jdwp_token_lock_);
void SendRequest(ExpandBuf* pReq);
void ResetState()
- LOCKS_EXCLUDED(event_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
/* atomic ops to get next serial number */
uint32_t NextRequestSerial();
uint32_t NextEventSerial();
void Run()
- LOCKS_EXCLUDED(Locks::mutator_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_, !thread_start_lock_,
+ !attach_lock_, !event_list_lock_);
/*
* Register an event by adding it to the event list.
@@ -278,48 +273,45 @@
* may discard its pointer after calling this.
*/
JdwpError RegisterEvent(JdwpEvent* pEvent)
- LOCKS_EXCLUDED(event_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Unregister an event, given the requestId.
*/
void UnregisterEventById(uint32_t requestId)
- LOCKS_EXCLUDED(event_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Unregister all events.
*/
void UnregisterAll()
- LOCKS_EXCLUDED(event_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
explicit JdwpState(const JdwpOptions* options);
- size_t ProcessRequest(Request* request, ExpandBuf* pReply, bool* skip_reply);
+ size_t ProcessRequest(Request* request, ExpandBuf* pReply, bool* skip_reply)
+ REQUIRES(!jdwp_token_lock_);
bool InvokeInProgress();
bool IsConnected();
void SuspendByPolicy(JdwpSuspendPolicy suspend_policy, JDWP::ObjectId thread_self_id)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
void SendRequestAndPossiblySuspend(ExpandBuf* pReq, JdwpSuspendPolicy suspend_policy,
ObjectId threadId)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_);
void CleanupMatchList(const std::vector<JdwpEvent*>& match_list)
- EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
void EventFinish(ExpandBuf* pReq);
bool FindMatchingEvents(JdwpEventKind eventKind, const ModBasket& basket,
std::vector<JdwpEvent*>* match_list)
- LOCKS_EXCLUDED(event_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
void FindMatchingEventsLocked(JdwpEventKind eventKind, const ModBasket& basket,
std::vector<JdwpEvent*>* match_list)
- EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
void UnregisterEvent(JdwpEvent* pEvent)
- EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
void SendBufferedRequest(uint32_t type, const std::vector<iovec>& iov);
/*
@@ -351,8 +343,8 @@
* events at the same time, so we grab a mutex in the SetWaitForJdwpToken
* call, and release it in the ClearWaitForJdwpToken call.
*/
- void SetWaitForJdwpToken(ObjectId threadId) LOCKS_EXCLUDED(jdwp_token_lock_);
- void ClearWaitForJdwpToken() LOCKS_EXCLUDED(jdwp_token_lock_);
+ void SetWaitForJdwpToken(ObjectId threadId) REQUIRES(!jdwp_token_lock_);
+ void ClearWaitForJdwpToken() REQUIRES(!jdwp_token_lock_);
public: // TODO: fix privacy
const JdwpOptions* options_;
@@ -415,9 +407,9 @@
bool processing_request_ GUARDED_BY(shutdown_lock_);
};
-std::string DescribeField(const FieldId& field_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-std::string DescribeMethod(const MethodId& method_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-std::string DescribeRefTypeId(const RefTypeId& ref_type_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+std::string DescribeField(const FieldId& field_id) SHARED_REQUIRES(Locks::mutator_lock_);
+std::string DescribeMethod(const MethodId& method_id) SHARED_REQUIRES(Locks::mutator_lock_);
+std::string DescribeRefTypeId(const RefTypeId& ref_type_id) SHARED_REQUIRES(Locks::mutator_lock_);
class Request {
public:
@@ -433,9 +425,9 @@
uint32_t ReadUnsigned32(const char* what);
- FieldId ReadFieldId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ FieldId ReadFieldId() SHARED_REQUIRES(Locks::mutator_lock_);
- MethodId ReadMethodId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ MethodId ReadMethodId() SHARED_REQUIRES(Locks::mutator_lock_);
ObjectId ReadObjectId(const char* specific_kind);
@@ -447,7 +439,7 @@
ObjectId ReadThreadGroupId();
- RefTypeId ReadRefTypeId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ RefTypeId ReadRefTypeId() SHARED_REQUIRES(Locks::mutator_lock_);
FrameId ReadFrameId();
@@ -461,7 +453,7 @@
JdwpTypeTag ReadTypeTag();
- JdwpLocation ReadLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ JdwpLocation ReadLocation() SHARED_REQUIRES(Locks::mutator_lock_);
JdwpModKind ReadModKind();
diff --git a/runtime/jdwp/jdwp_adb.cc b/runtime/jdwp/jdwp_adb.cc
index adc2912..51952c4 100644
--- a/runtime/jdwp/jdwp_adb.cc
+++ b/runtime/jdwp/jdwp_adb.cc
@@ -24,7 +24,7 @@
#include "base/stringprintf.h"
#include "jdwp/jdwp_priv.h"
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
#include "cutils/sockets.h"
#endif
@@ -224,7 +224,7 @@
*/
int ret = connect(control_sock_, &control_addr_.controlAddrPlain, control_addr_len_);
if (!ret) {
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
if (!socket_peer_is_trusted(control_sock_)) {
if (shutdown(control_sock_, SHUT_RDWR)) {
PLOG(ERROR) << "trouble shutting down socket";
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 14f097f..5d21f17 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -447,7 +447,7 @@
* need to do this even if later mods cause us to ignore the event.
*/
static bool ModsMatch(JdwpEvent* pEvent, const ModBasket& basket)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
JdwpEventMod* pMod = pEvent->mods;
for (int i = pEvent->modCount; i > 0; i--, pMod++) {
@@ -784,7 +784,7 @@
static void LogMatchingEventsAndThread(const std::vector<JdwpEvent*> match_list,
ObjectId thread_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
for (size_t i = 0, e = match_list.size(); i < e; ++i) {
JdwpEvent* pEvent = match_list[i];
VLOG(jdwp) << "EVENT #" << i << ": " << pEvent->eventKind
@@ -800,7 +800,7 @@
static void SetJdwpLocationFromEventLocation(const JDWP::EventLocation* event_location,
JDWP::JdwpLocation* jdwp_location)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(event_location != nullptr);
DCHECK(jdwp_location != nullptr);
Dbg::SetJdwpLocation(jdwp_location, event_location->method, event_location->dex_pc);
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index d4e2656..f449406 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -53,7 +53,7 @@
}
static JdwpError WriteTaggedObject(ExpandBuf* reply, ObjectId object_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
uint8_t tag;
JdwpError rc = Dbg::GetObjectTag(object_id, &tag);
if (rc == ERR_NONE) {
@@ -64,7 +64,7 @@
}
static JdwpError WriteTaggedObjectList(ExpandBuf* reply, const std::vector<ObjectId>& objects)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
expandBufAdd4BE(reply, objects.size());
for (size_t i = 0; i < objects.size(); ++i) {
JdwpError rc = WriteTaggedObject(reply, objects[i]);
@@ -84,7 +84,7 @@
static JdwpError RequestInvoke(JdwpState*, Request* request,
ObjectId thread_id, ObjectId object_id,
RefTypeId class_id, MethodId method_id, bool is_constructor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(!is_constructor || object_id != 0);
int32_t arg_count = request->ReadSigned32("argument count");
@@ -123,7 +123,7 @@
}
static JdwpError VM_Version(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Text information on runtime version.
std::string version(StringPrintf("Android Runtime %s", Runtime::Current()->GetVersion()));
expandBufAddUtf8String(pReply, version);
@@ -147,7 +147,7 @@
* been loaded by multiple class loaders.
*/
static JdwpError VM_ClassesBySignature(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::string classDescriptor(request->ReadUtf8String());
std::vector<RefTypeId> ids;
@@ -179,7 +179,7 @@
* to be suspended, and that violates some JDWP expectations.
*/
static JdwpError VM_AllThreads(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::vector<ObjectId> thread_ids;
Dbg::GetThreads(nullptr /* all thread groups */, &thread_ids);
@@ -195,7 +195,7 @@
* List all thread groups that do not have a parent.
*/
static JdwpError VM_TopLevelThreadGroups(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
/*
* TODO: maintain a list of parentless thread groups in the VM.
*
@@ -214,7 +214,7 @@
* Respond with the sizes of the basic debugger types.
*/
static JdwpError VM_IDSizes(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
expandBufAdd4BE(pReply, sizeof(FieldId));
expandBufAdd4BE(pReply, sizeof(MethodId));
expandBufAdd4BE(pReply, sizeof(ObjectId));
@@ -224,7 +224,7 @@
}
static JdwpError VM_Dispose(JdwpState*, Request*, ExpandBuf*)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Dbg::Dispose();
return ERR_NONE;
}
@@ -236,7 +236,7 @@
* This needs to increment the "suspend count" on all threads.
*/
static JdwpError VM_Suspend(JdwpState*, Request*, ExpandBuf*)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Thread* self = Thread::Current();
self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
Dbg::SuspendVM();
@@ -248,13 +248,13 @@
* Resume execution. Decrements the "suspend count" of all threads.
*/
static JdwpError VM_Resume(JdwpState*, Request*, ExpandBuf*)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Dbg::ResumeVM();
return ERR_NONE;
}
static JdwpError VM_Exit(JdwpState* state, Request* request, ExpandBuf*)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t exit_status = request->ReadUnsigned32("exit_status");
state->ExitAfterReplying(exit_status);
return ERR_NONE;
@@ -267,7 +267,7 @@
* string "java.util.Arrays".)
*/
static JdwpError VM_CreateString(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::string str(request->ReadUtf8String());
ObjectId string_id;
JdwpError status = Dbg::CreateString(str, &string_id);
@@ -279,7 +279,7 @@
}
static JdwpError VM_ClassPaths(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
expandBufAddUtf8String(pReply, "/");
std::vector<std::string> class_path;
@@ -300,7 +300,7 @@
}
static JdwpError VM_DisposeObjects(JdwpState*, Request* request, ExpandBuf*)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
size_t object_count = request->ReadUnsigned32("object_count");
for (size_t i = 0; i < object_count; ++i) {
ObjectId object_id = request->ReadObjectId();
@@ -311,7 +311,7 @@
}
static JdwpError VM_Capabilities(JdwpState*, Request*, ExpandBuf* reply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
expandBufAdd1(reply, true); // canWatchFieldModification
expandBufAdd1(reply, true); // canWatchFieldAccess
expandBufAdd1(reply, true); // canGetBytecodes
@@ -323,7 +323,7 @@
}
static JdwpError VM_CapabilitiesNew(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// The first few capabilities are the same as those reported by the older call.
VM_Capabilities(nullptr, request, reply);
@@ -350,7 +350,7 @@
}
static JdwpError VM_AllClassesImpl(ExpandBuf* pReply, bool descriptor_and_status, bool generic)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::vector<JDWP::RefTypeId> classes;
Dbg::GetClassList(&classes);
@@ -381,17 +381,17 @@
}
static JdwpError VM_AllClasses(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return VM_AllClassesImpl(pReply, true, false);
}
static JdwpError VM_AllClassesWithGeneric(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return VM_AllClassesImpl(pReply, true, true);
}
static JdwpError VM_InstanceCounts(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
int32_t class_count = request->ReadSigned32("class count");
if (class_count < 0) {
return ERR_ILLEGAL_ARGUMENT;
@@ -415,7 +415,7 @@
}
static JdwpError RT_Modifiers(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::GetModifiers(refTypeId, pReply);
}
@@ -424,7 +424,7 @@
* Get values from static fields in a reference type.
*/
static JdwpError RT_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
int32_t field_count = request->ReadSigned32("field count");
expandBufAdd4BE(pReply, field_count);
@@ -442,7 +442,7 @@
* Get the name of the source file in which a reference type was declared.
*/
static JdwpError RT_SourceFile(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
std::string source_file;
JdwpError status = Dbg::GetSourceFile(refTypeId, &source_file);
@@ -457,7 +457,7 @@
* Return the current status of the reference type.
*/
static JdwpError RT_Status(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
JDWP::JdwpTypeTag type_tag;
uint32_t class_status;
@@ -473,7 +473,7 @@
* Return interfaces implemented directly by this class.
*/
static JdwpError RT_Interfaces(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::OutputDeclaredInterfaces(refTypeId, pReply);
}
@@ -482,7 +482,7 @@
* Return the class object corresponding to this type.
*/
static JdwpError RT_ClassObject(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
ObjectId class_object_id;
JdwpError status = Dbg::GetClassObject(refTypeId, &class_object_id);
@@ -500,13 +500,13 @@
* JDB seems interested, but DEX files don't currently support this.
*/
static JdwpError RT_SourceDebugExtension(JdwpState*, Request*, ExpandBuf*)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
/* referenceTypeId in, string out */
return ERR_ABSENT_INFORMATION;
}
static JdwpError RT_Signature(JdwpState*, Request* request, ExpandBuf* pReply, bool with_generic)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
std::string signature;
@@ -522,12 +522,12 @@
}
static JdwpError RT_Signature(JdwpState* state, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return RT_Signature(state, request, pReply, false);
}
static JdwpError RT_SignatureWithGeneric(JdwpState* state, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return RT_Signature(state, request, pReply, true);
}
@@ -536,7 +536,7 @@
* reference type, or null if it was loaded by the system loader.
*/
static JdwpError RT_ClassLoader(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::GetClassLoader(refTypeId, pReply);
}
@@ -546,14 +546,14 @@
* fields declared by a class.
*/
static JdwpError RT_FieldsWithGeneric(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::OutputDeclaredFields(refTypeId, true, pReply);
}
// Obsolete equivalent of FieldsWithGeneric, without the generic type information.
static JdwpError RT_Fields(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::OutputDeclaredFields(refTypeId, false, pReply);
}
@@ -563,20 +563,20 @@
* methods declared by a class.
*/
static JdwpError RT_MethodsWithGeneric(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::OutputDeclaredMethods(refTypeId, true, pReply);
}
// Obsolete equivalent of MethodsWithGeneric, without the generic type information.
static JdwpError RT_Methods(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::OutputDeclaredMethods(refTypeId, false, pReply);
}
static JdwpError RT_Instances(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
int32_t max_count = request->ReadSigned32("max count");
if (max_count < 0) {
@@ -596,7 +596,7 @@
* Return the immediate superclass of a class.
*/
static JdwpError CT_Superclass(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
RefTypeId superClassId;
JdwpError status = Dbg::GetSuperclass(class_id, &superClassId);
@@ -611,7 +611,7 @@
* Set static class values.
*/
static JdwpError CT_SetValues(JdwpState* , Request* request, ExpandBuf*)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
int32_t values_count = request->ReadSigned32("values count");
@@ -641,7 +641,7 @@
*/
static JdwpError CT_InvokeMethod(JdwpState* state, Request* request,
ExpandBuf* pReply ATTRIBUTE_UNUSED)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
ObjectId thread_id = request->ReadThreadId();
MethodId method_id = request->ReadMethodId();
@@ -658,7 +658,7 @@
*/
static JdwpError CT_NewInstance(JdwpState* state, Request* request,
ExpandBuf* pReply ATTRIBUTE_UNUSED)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
ObjectId thread_id = request->ReadThreadId();
MethodId method_id = request->ReadMethodId();
@@ -675,7 +675,7 @@
* Create a new array object of the requested type and length.
*/
static JdwpError AT_newInstance(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId arrayTypeId = request->ReadRefTypeId();
int32_t length = request->ReadSigned32("length");
@@ -693,7 +693,7 @@
* Return line number information for the method, if present.
*/
static JdwpError M_LineTable(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
MethodId method_id = request->ReadMethodId();
@@ -704,7 +704,7 @@
static JdwpError M_VariableTable(JdwpState*, Request* request, ExpandBuf* pReply,
bool generic)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
MethodId method_id = request->ReadMethodId();
@@ -717,17 +717,17 @@
}
static JdwpError M_VariableTable(JdwpState* state, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return M_VariableTable(state, request, pReply, false);
}
static JdwpError M_VariableTableWithGeneric(JdwpState* state, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return M_VariableTable(state, request, pReply, true);
}
static JdwpError M_Bytecodes(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
MethodId method_id = request->ReadMethodId();
@@ -753,7 +753,7 @@
* passed in here.
*/
static JdwpError OR_ReferenceType(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
return Dbg::GetReferenceType(object_id, pReply);
}
@@ -762,7 +762,7 @@
* Get values from the fields of an object.
*/
static JdwpError OR_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
int32_t field_count = request->ReadSigned32("field count");
@@ -782,7 +782,7 @@
* Set values in the fields of an object.
*/
static JdwpError OR_SetValues(JdwpState*, Request* request, ExpandBuf*)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
int32_t field_count = request->ReadSigned32("field count");
@@ -804,7 +804,7 @@
}
static JdwpError OR_MonitorInfo(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
return Dbg::GetMonitorInfo(object_id, reply);
}
@@ -822,7 +822,7 @@
*/
static JdwpError OR_InvokeMethod(JdwpState* state, Request* request,
ExpandBuf* pReply ATTRIBUTE_UNUSED)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
ObjectId thread_id = request->ReadThreadId();
RefTypeId class_id = request->ReadRefTypeId();
@@ -832,19 +832,19 @@
}
static JdwpError OR_DisableCollection(JdwpState*, Request* request, ExpandBuf*)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
return Dbg::DisableCollection(object_id);
}
static JdwpError OR_EnableCollection(JdwpState*, Request* request, ExpandBuf*)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
return Dbg::EnableCollection(object_id);
}
static JdwpError OR_IsCollected(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
bool is_collected;
JdwpError rc = Dbg::IsCollected(object_id, &is_collected);
@@ -853,7 +853,7 @@
}
static JdwpError OR_ReferringObjects(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
int32_t max_count = request->ReadSigned32("max count");
if (max_count < 0) {
@@ -873,7 +873,7 @@
* Return the string value in a string object.
*/
static JdwpError SR_Value(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId stringObject = request->ReadObjectId();
std::string str;
JDWP::JdwpError error = Dbg::StringToUtf8(stringObject, &str);
@@ -892,7 +892,7 @@
* Return a thread's name.
*/
static JdwpError TR_Name(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
std::string name;
@@ -913,7 +913,7 @@
* resume it; only the JDI is allowed to resume it.
*/
static JdwpError TR_Suspend(JdwpState*, Request* request, ExpandBuf*)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
if (thread_id == Dbg::GetThreadSelfId()) {
@@ -932,7 +932,7 @@
* Resume the specified thread.
*/
static JdwpError TR_Resume(JdwpState*, Request* request, ExpandBuf*)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
if (thread_id == Dbg::GetThreadSelfId()) {
@@ -948,7 +948,7 @@
* Return status of specified thread.
*/
static JdwpError TR_Status(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
JDWP::JdwpThreadStatus threadStatus;
@@ -970,7 +970,7 @@
* Return the thread group that the specified thread is a member of.
*/
static JdwpError TR_ThreadGroup(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
return Dbg::GetThreadGroup(thread_id, pReply);
}
@@ -982,7 +982,7 @@
* be THREAD_NOT_SUSPENDED.
*/
static JdwpError TR_Frames(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
uint32_t start_frame = request->ReadUnsigned32("start frame");
uint32_t length = request->ReadUnsigned32("length");
@@ -1014,7 +1014,7 @@
* Returns the #of frames on the specified thread, which must be suspended.
*/
static JdwpError TR_FrameCount(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
size_t frame_count;
@@ -1028,7 +1028,7 @@
}
static JdwpError TR_OwnedMonitors(Request* request, ExpandBuf* reply, bool with_stack_depths)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
std::vector<ObjectId> monitors;
@@ -1052,17 +1052,17 @@
}
static JdwpError TR_OwnedMonitors(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return TR_OwnedMonitors(request, reply, false);
}
static JdwpError TR_OwnedMonitorsStackDepthInfo(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return TR_OwnedMonitors(request, reply, true);
}
static JdwpError TR_CurrentContendedMonitor(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
ObjectId contended_monitor;
@@ -1074,7 +1074,7 @@
}
static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
UNUSED(reply);
ObjectId thread_id = request->ReadThreadId();
return Dbg::Interrupt(thread_id);
@@ -1087,7 +1087,7 @@
* its suspend count recently.)
*/
static JdwpError TR_DebugSuspendCount(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
return Dbg::GetThreadDebugSuspendCount(thread_id, pReply);
}
@@ -1098,7 +1098,7 @@
* The Eclipse debugger recognizes "main" and "system" as special.
*/
static JdwpError TGR_Name(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId thread_group_id = request->ReadThreadGroupId();
return Dbg::GetThreadGroupName(thread_group_id, pReply);
}
@@ -1108,7 +1108,7 @@
* thread group.
*/
static JdwpError TGR_Parent(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId thread_group_id = request->ReadThreadGroupId();
return Dbg::GetThreadGroupParent(thread_group_id, pReply);
}
@@ -1118,7 +1118,7 @@
* specified thread group.
*/
static JdwpError TGR_Children(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId thread_group_id = request->ReadThreadGroupId();
return Dbg::GetThreadGroupChildren(thread_group_id, pReply);
}
@@ -1127,7 +1127,7 @@
* Return the #of components in the array.
*/
static JdwpError AR_Length(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId array_id = request->ReadArrayId();
int32_t length;
@@ -1146,7 +1146,7 @@
* Return the values from an array.
*/
static JdwpError AR_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId array_id = request->ReadArrayId();
uint32_t offset = request->ReadUnsigned32("offset");
uint32_t length = request->ReadUnsigned32("length");
@@ -1157,7 +1157,7 @@
* Set values in an array.
*/
static JdwpError AR_SetValues(JdwpState*, Request* request, ExpandBuf*)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId array_id = request->ReadArrayId();
uint32_t offset = request->ReadUnsigned32("offset");
uint32_t count = request->ReadUnsigned32("count");
@@ -1165,7 +1165,7 @@
}
static JdwpError CLR_VisibleClasses(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
request->ReadObjectId(); // classLoaderObject
// TODO: we should only return classes which have the given class loader as a defining or
// initiating loader. The former would be easy; the latter is hard, because we don't have
@@ -1179,7 +1179,7 @@
* Reply with a requestID.
*/
static JdwpError ER_Set(JdwpState* state, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
JdwpEventKind event_kind = request->ReadEnum1<JdwpEventKind>("event kind");
JdwpSuspendPolicy suspend_policy = request->ReadEnum1<JdwpSuspendPolicy>("suspend policy");
int32_t modifier_count = request->ReadSigned32("modifier count");
@@ -1322,7 +1322,7 @@
}
static JdwpError ER_Clear(JdwpState* state, Request* request, ExpandBuf*)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
request->ReadEnum1<JdwpEventKind>("event kind");
uint32_t requestId = request->ReadUnsigned32("request id");
@@ -1336,7 +1336,7 @@
* Return the values of arguments and local variables.
*/
static JdwpError SF_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return Dbg::GetLocalValues(request, pReply);
}
@@ -1344,12 +1344,12 @@
* Set the values of arguments and local variables.
*/
static JdwpError SF_SetValues(JdwpState*, Request* request, ExpandBuf*)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return Dbg::SetLocalValues(request);
}
static JdwpError SF_ThisObject(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
FrameId frame_id = request->ReadFrameId();
@@ -1370,7 +1370,7 @@
* that, or I have no idea what this is for.)
*/
static JdwpError COR_ReflectedType(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
RefTypeId class_object_id = request->ReadRefTypeId();
return Dbg::GetReflectedType(class_object_id, pReply);
}
@@ -1379,7 +1379,7 @@
* Handle a DDM packet with a single chunk in it.
*/
static JdwpError DDM_Chunk(JdwpState* state, Request* request, ExpandBuf* pReply)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
state->NotifyDdmsActive();
uint8_t* replyBuf = nullptr;
int replyLen = -1;
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index 6bc5e27..5a9a0f5 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -248,7 +248,7 @@
case kJdwpTransportSocket:
InitSocketTransport(state.get(), options);
break;
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
case kJdwpTransportAndroidAdb:
InitAdbTransport(state.get(), options);
break;
@@ -256,12 +256,12 @@
default:
LOG(FATAL) << "Unknown transport: " << options->transport;
}
-
{
/*
* Grab a mutex before starting the thread. This ensures they
* won't signal the cond var before we're waiting.
*/
+ state->thread_start_lock_.AssertNotHeld(self);
MutexLock thread_start_locker(self, state->thread_start_lock_);
/*
diff --git a/runtime/jdwp/jdwp_priv.h b/runtime/jdwp/jdwp_priv.h
index d58467d..29314f6 100644
--- a/runtime/jdwp/jdwp_priv.h
+++ b/runtime/jdwp/jdwp_priv.h
@@ -86,8 +86,8 @@
void Close();
- ssize_t WritePacket(ExpandBuf* pReply, size_t length) LOCKS_EXCLUDED(socket_lock_);
- ssize_t WriteBufferedPacket(const std::vector<iovec>& iov) LOCKS_EXCLUDED(socket_lock_);
+ ssize_t WritePacket(ExpandBuf* pReply, size_t length) REQUIRES(!socket_lock_);
+ ssize_t WriteBufferedPacket(const std::vector<iovec>& iov) REQUIRES(!socket_lock_);
Mutex* GetSocketLock() {
return &socket_lock_;
}
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
index 2b28f7d..3fbad36 100644
--- a/runtime/jdwp/object_registry.cc
+++ b/runtime/jdwp/object_registry.cc
@@ -63,13 +63,13 @@
// Explicit template instantiation.
template
-SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_)
+SHARED_REQUIRES(Locks::mutator_lock_)
+REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
JDWP::ObjectId ObjectRegistry::Add(Handle<mirror::Object> obj_h);
template
-SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_)
+SHARED_REQUIRES(Locks::mutator_lock_)
+REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
JDWP::ObjectId ObjectRegistry::Add(Handle<mirror::Throwable> obj_h);
template<class T>
diff --git a/runtime/jdwp/object_registry.h b/runtime/jdwp/object_registry.h
index 4c149cd..17490f4 100644
--- a/runtime/jdwp/object_registry.h
+++ b/runtime/jdwp/object_registry.h
@@ -63,28 +63,24 @@
ObjectRegistry();
JDWP::ObjectId Add(mirror::Object* o)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
JDWP::RefTypeId AddRefType(mirror::Class* c)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
template<class T>
JDWP::ObjectId Add(Handle<T> obj_h)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
JDWP::RefTypeId AddRefType(Handle<mirror::Class> c_h)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
template<typename T> T Get(JDWP::ObjectId id, JDWP::JdwpError* error)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_) {
if (id == 0) {
*error = JDWP::ERR_NONE;
return nullptr;
@@ -92,47 +88,42 @@
return down_cast<T>(InternalGet(id, error));
}
- void Clear() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Clear() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
void DisableCollection(JDWP::ObjectId id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
void EnableCollection(JDWP::ObjectId id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
bool IsCollected(JDWP::ObjectId id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
void DisposeObject(JDWP::ObjectId id, uint32_t reference_count)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
// This is needed to get the jobject instead of the Object*.
// Avoid using this and use standard Get when possible.
- jobject GetJObject(JDWP::ObjectId id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ jobject GetJObject(JDWP::ObjectId id) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
private:
template<class T>
JDWP::ObjectId InternalAdd(Handle<T> obj_h)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(lock_,
- Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
mirror::Object* InternalGet(JDWP::ObjectId id, JDWP::JdwpError* error)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
void Demote(ObjectRegistryEntry& entry)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(lock_);
void Promote(ObjectRegistryEntry& entry)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(lock_);
bool ContainsLocked(Thread* self, mirror::Object* o, int32_t identity_hash_code,
ObjectRegistryEntry** out_entry)
- EXCLUSIVE_LOCKS_REQUIRED(lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(lock_) SHARED_REQUIRES(Locks::mutator_lock_);
Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::multimap<int32_t, ObjectRegistryEntry*> object_to_entry_ GUARDED_BY(lock_);
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index dbd8977..ca6e7ea 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -48,7 +48,7 @@
virtual ~Jit();
static Jit* Create(JitOptions* options, std::string* error_msg);
bool CompileMethod(ArtMethod* method, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CreateInstrumentationCache(size_t compile_threshold);
void CreateThreadPool();
CompilerCallbacks* GetCompilerCallbacks() {
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index c1ea921..9707f6f 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -78,27 +78,27 @@
// Return true if the code cache contains the code pointer which si the entrypoint of the method.
bool ContainsMethod(ArtMethod* method) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Return true if the code cache contains a code ptr.
bool ContainsCodePtr(const void* ptr) const;
// Reserve a region of code of size at least "size". Returns null if there is no more room.
- uint8_t* ReserveCode(Thread* self, size_t size) LOCKS_EXCLUDED(lock_);
+ uint8_t* ReserveCode(Thread* self, size_t size) REQUIRES(!lock_);
// Add a data array of size (end - begin) with the associated contents, returns null if there
// is no more room.
uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end)
- LOCKS_EXCLUDED(lock_);
+ REQUIRES(!lock_);
// Get code for a method, returns null if it is not in the jit cache.
const void* GetCodeFor(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
// Save the compiled code for a method so that GetCodeFor(method) will return old_code_ptr if the
// entrypoint isn't within the cache.
void SaveCompiledCode(ArtMethod* method, const void* old_code_ptr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
private:
// Takes ownership of code_mem_map.
diff --git a/runtime/jit/jit_instrumentation.h b/runtime/jit/jit_instrumentation.h
index 27894eb..0deaf8a 100644
--- a/runtime/jit/jit_instrumentation.h
+++ b/runtime/jit/jit_instrumentation.h
@@ -47,9 +47,9 @@
public:
explicit JitInstrumentationCache(size_t hot_method_threshold);
void AddSamples(Thread* self, ArtMethod* method, size_t samples)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
void SignalCompiled(Thread* self, ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
void CreateThreadPool();
void DeleteThreadPool();
@@ -68,7 +68,7 @@
virtual void MethodEntered(Thread* thread, mirror::Object* /*this_object*/,
ArtMethod* method, uint32_t /*dex_pc*/)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
instrumentation_cache_->AddSamples(thread, method, 1);
}
virtual void MethodExited(Thread* /*thread*/, mirror::Object* /*this_object*/,
@@ -92,7 +92,7 @@
// We only care about how many dex instructions were executed in the Jit.
virtual void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK_LE(dex_pc_offset, 0);
instrumentation_cache_->AddSamples(thread, method, 1);
}
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index 84fc404..b18b430 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -63,14 +63,14 @@
JNIEnvExt::~JNIEnvExt() {
}
-jobject JNIEnvExt::NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+jobject JNIEnvExt::NewLocalRef(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
if (obj == nullptr) {
return nullptr;
}
return reinterpret_cast<jobject>(locals.Add(local_ref_cookie, obj));
}
-void JNIEnvExt::DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void JNIEnvExt::DeleteLocalRef(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_) {
if (obj != nullptr) {
locals.Remove(local_ref_cookie, reinterpret_cast<IndirectRef>(obj));
}
@@ -86,14 +86,14 @@
monitors.Dump(os);
}
-void JNIEnvExt::PushFrame(int capacity) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void JNIEnvExt::PushFrame(int capacity) SHARED_REQUIRES(Locks::mutator_lock_) {
UNUSED(capacity); // cpplint gets confused with (int) and thinks its a cast.
// TODO: take 'capacity' into account.
stacked_local_ref_cookies.push_back(local_ref_cookie);
local_ref_cookie = locals.GetSegmentState();
}
-void JNIEnvExt::PopFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void JNIEnvExt::PopFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
locals.SetSegmentState(local_ref_cookie);
local_ref_cookie = stacked_local_ref_cookies.back();
stacked_local_ref_cookies.pop_back();
diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h
index 29d912c..9b55536 100644
--- a/runtime/jni_env_ext.h
+++ b/runtime/jni_env_ext.h
@@ -39,7 +39,7 @@
~JNIEnvExt();
void DumpReferenceTables(std::ostream& os)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void SetCheckJniEnabled(bool enabled);
@@ -48,7 +48,7 @@
template<typename T>
T AddLocalReference(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static Offset SegmentStateOffset();
@@ -60,8 +60,8 @@
return Offset(OFFSETOF_MEMBER(JNIEnvExt, self));
}
- jobject NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ jobject NewLocalRef(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void DeleteLocalRef(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_);
Thread* const self;
JavaVMExt* const vm;
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index cc176b7..6a716b5 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -89,7 +89,7 @@
static void ThrowNoSuchMethodError(ScopedObjectAccess& soa, mirror::Class* c,
const char* name, const char* sig, const char* kind)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::string temp;
soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;",
"no %s method \"%s.%s%s\"",
@@ -98,7 +98,7 @@
static void ReportInvalidJNINativeMethod(const ScopedObjectAccess& soa, mirror::Class* c,
const char* kind, jint idx, bool return_errors)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
LOG(return_errors ? ERROR : FATAL) << "Failed to register native method in "
<< PrettyDescriptor(c) << " in " << c->GetDexCache()->GetLocation()->ToModifiedUtf8()
<< ": " << kind << " is null at index " << idx;
@@ -107,7 +107,7 @@
}
static mirror::Class* EnsureInitialized(Thread* self, mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (LIKELY(klass->IsInitialized())) {
return klass;
}
@@ -121,7 +121,7 @@
static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class,
const char* name, const char* sig, bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Class* c = EnsureInitialized(soa.Self(), soa.Decode<mirror::Class*>(jni_class));
if (c == nullptr) {
return nullptr;
@@ -148,7 +148,7 @@
}
static mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* method = soa.Self()->GetCurrentMethod(nullptr);
// If we are running Runtime.nativeLoad, use the overriding ClassLoader it set.
if (method == soa.DecodeMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) {
@@ -179,7 +179,7 @@
static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, const char* name,
const char* sig, bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
StackHandleScope<2> hs(soa.Self());
Handle<mirror::Class> c(
hs.NewHandle(EnsureInitialized(soa.Self(), soa.Decode<mirror::Class*>(jni_class))));
@@ -227,7 +227,7 @@
static void ThrowAIOOBE(ScopedObjectAccess& soa, mirror::Array* array, jsize start,
jsize length, const char* identifier)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::string type(PrettyTypeOf(array));
soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;",
"%s offset=%d length=%d %s.length=%d",
@@ -236,14 +236,14 @@
static void ThrowSIOOBE(ScopedObjectAccess& soa, jsize start, jsize length,
jsize array_length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
soa.Self()->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;",
"offset=%d length=%d string.length()=%d", start, length,
array_length);
}
int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause)
- LOCKS_EXCLUDED(Locks::mutator_lock_) {
+ REQUIRES(!Locks::mutator_lock_) {
// Turn the const char* into a java.lang.String.
ScopedLocalRef<jstring> s(env, env->NewStringUTF(msg));
if (msg != nullptr && s.get() == nullptr) {
@@ -314,7 +314,7 @@
template <bool kNative>
static ArtMethod* FindMethod(mirror::Class* c, const StringPiece& name, const StringPiece& sig)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (auto& method : c->GetDirectMethods(pointer_size)) {
if (kNative == method.IsNative() && name == method.GetName() && method.GetSignature() == sig) {
@@ -2321,7 +2321,7 @@
private:
static jint EnsureLocalCapacityInternal(ScopedObjectAccess& soa, jint desired_capacity,
const char* caller)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// TODO: we should try to expand the table if necessary.
if (desired_capacity < 0 || desired_capacity > static_cast<jint>(kLocalsMax)) {
LOG(ERROR) << "Invalid capacity given to " << caller << ": " << desired_capacity;
@@ -2350,7 +2350,7 @@
template <typename JArrayT, typename ElementT, typename ArtArrayT>
static ArtArrayT* DecodeAndCheckArrayType(ScopedObjectAccess& soa, JArrayT java_array,
const char* fn_name, const char* operation)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ArtArrayT* array = soa.Decode<ArtArrayT*>(java_array);
if (UNLIKELY(ArtArrayT::GetArrayClass() != array->GetClass())) {
soa.Vm()->JniAbortF(fn_name,
@@ -2407,7 +2407,7 @@
static void ReleasePrimitiveArray(ScopedObjectAccess& soa, mirror::Array* array,
size_t component_size, void* elements, jint mode)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
void* array_data = array->GetRawData(component_size, 0);
gc::Heap* heap = Runtime::Current()->GetHeap();
bool is_copy = array_data != elements;
diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc
index 64a6076..22cc820 100644
--- a/runtime/lambda/box_table.cc
+++ b/runtime/lambda/box_table.cc
@@ -94,8 +94,7 @@
return method_as_object;
}
-bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) {
- DCHECK(object != nullptr);
+bool BoxTable::UnboxLambda(mirror::Object* object, out<ClosureType> out_closure) {
*out_closure = nullptr;
// Note that we do not need to access lambda_table_lock_ here
diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h
index 12d3ff3..c6d3d0c 100644
--- a/runtime/lambda/box_table.h
+++ b/runtime/lambda/box_table.h
@@ -18,6 +18,7 @@
#include "base/allocator.h"
#include "base/hash_map.h"
+#include "base/out.h"
#include "gc_root.h"
#include "base/macros.h"
#include "base/mutex.h"
@@ -48,30 +49,28 @@
// Boxes a closure into an object. Returns null and throws an exception on failure.
mirror::Object* BoxLambda(const ClosureType& closure)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
// Unboxes an object back into the lambda. Returns false and throws an exception on failure.
- bool UnboxLambda(mirror::Object* object, ClosureType* out_closure)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool UnboxLambda(mirror::Object* object, out<ClosureType> out_closure)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Sweep weak references to lambda boxes. Update the addresses if the objects have been
// moved, and delete them from the table if the objects have been cleaned up.
void SweepWeakBoxedLambdas(IsMarkedVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
// GC callback: Temporarily block anyone from touching the map.
void DisallowNewWeakBoxedLambdas()
- LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+ REQUIRES(!Locks::lambda_table_lock_);
// GC callback: Unblock any readers who have been queued waiting to touch the map.
void AllowNewWeakBoxedLambdas()
- LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+ REQUIRES(!Locks::lambda_table_lock_);
// GC callback: Verify that the state is now blocking anyone from touching the map.
void EnsureNewWeakBoxedLambdasDisallowed()
- LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+ REQUIRES(!Locks::lambda_table_lock_);
BoxTable();
~BoxTable() = default;
@@ -93,11 +92,11 @@
// Attempt to look up the lambda in the map, or return null if it's not there yet.
ValueType FindBoxedLambda(const ClosureType& closure) const
- SHARED_LOCKS_REQUIRED(Locks::lambda_table_lock_);
+ SHARED_REQUIRES(Locks::lambda_table_lock_);
// If the GC has come in and temporarily disallowed touching weaks, block until is it allowed.
void BlockUntilWeaksAllowed()
- SHARED_LOCKS_REQUIRED(Locks::lambda_table_lock_);
+ SHARED_REQUIRES(Locks::lambda_table_lock_);
// EmptyFn implementation for art::HashMap
struct EmptyFn {
diff --git a/runtime/linear_alloc.h b/runtime/linear_alloc.h
index c10ddfd..743ee77 100644
--- a/runtime/linear_alloc.h
+++ b/runtime/linear_alloc.h
@@ -28,24 +28,24 @@
public:
explicit LinearAlloc(ArenaPool* pool);
- void* Alloc(Thread* self, size_t size) LOCKS_EXCLUDED(lock_);
+ void* Alloc(Thread* self, size_t size) REQUIRES(!lock_);
// Realloc never frees the input pointer, it is the caller's job to do this if necessary.
- void* Realloc(Thread* self, void* ptr, size_t old_size, size_t new_size) LOCKS_EXCLUDED(lock_);
+ void* Realloc(Thread* self, void* ptr, size_t old_size, size_t new_size) REQUIRES(!lock_);
// Allocate and construct an array of structs of type T.
template<class T>
- T* AllocArray(Thread* self, size_t elements) {
+ T* AllocArray(Thread* self, size_t elements) REQUIRES(!lock_) {
return reinterpret_cast<T*>(Alloc(self, elements * sizeof(T)));
}
// Return the number of bytes used in the allocator.
- size_t GetUsedMemory() const LOCKS_EXCLUDED(lock_);
+ size_t GetUsedMemory() const REQUIRES(!lock_);
- ArenaPool* GetArenaPool() LOCKS_EXCLUDED(lock_);
+ ArenaPool* GetArenaPool() REQUIRES(!lock_);
// Return true if the linear alloc contrains an address.
- bool Contains(void* ptr) const;
+ bool Contains(void* ptr) const REQUIRES(!lock_);
private:
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 8df8f96..d9ad7dc 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -280,7 +280,7 @@
ScopedFd fd(-1);
#ifdef USE_ASHMEM
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
const bool use_ashmem = true;
#else
// When not on Android ashmem is faked using files in /tmp. Ensure that such files won't
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 01e29c9..196a7f6 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -92,7 +92,7 @@
std::string* error_msg);
// Releases the memory mapping.
- ~MemMap() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+ ~MemMap() REQUIRES(!Locks::mem_maps_lock_);
const std::string& GetName() const {
return name_;
@@ -142,25 +142,25 @@
std::string* error_msg);
static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
- LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+ REQUIRES(!Locks::mem_maps_lock_);
static void DumpMaps(std::ostream& os, bool terse = false)
- LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+ REQUIRES(!Locks::mem_maps_lock_);
typedef AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps> Maps;
- static void Init() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
- static void Shutdown() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+ static void Init() REQUIRES(!Locks::mem_maps_lock_);
+ static void Shutdown() REQUIRES(!Locks::mem_maps_lock_);
private:
MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin, size_t base_size,
- int prot, bool reuse, size_t redzone_size = 0) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+ int prot, bool reuse, size_t redzone_size = 0) REQUIRES(!Locks::mem_maps_lock_);
static void DumpMapsLocked(std::ostream& os, bool terse)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
+ REQUIRES(Locks::mem_maps_lock_);
static bool HasMemMap(MemMap* map)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
+ REQUIRES(Locks::mem_maps_lock_);
static MemMap* GetLargestMemMapAt(void* address)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
+ REQUIRES(Locks::mem_maps_lock_);
const std::string name_;
uint8_t* const begin_; // Start of data.
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
index 6240b3b..dc084be 100644
--- a/runtime/mirror/abstract_method.h
+++ b/runtime/mirror/abstract_method.h
@@ -34,12 +34,13 @@
class MANAGED AbstractMethod : public AccessibleObject {
public:
// Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod.
- bool CreateFromArtMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool CreateFromArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
- ArtMethod* GetArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetArtMethod() SHARED_REQUIRES(Locks::mutator_lock_);
// Only used by the image writer.
- void SetArtMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
private:
static MemberOffset ArtMethodOffset() {
diff --git a/runtime/mirror/accessible_object.h b/runtime/mirror/accessible_object.h
index 6d4c0f6..dcf5118 100644
--- a/runtime/mirror/accessible_object.h
+++ b/runtime/mirror/accessible_object.h
@@ -36,12 +36,12 @@
}
template<bool kTransactionActive>
- void SetAccessible(bool value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetAccessible(bool value) SHARED_REQUIRES(Locks::mutator_lock_) {
UNUSED(padding_);
return SetFieldBoolean<kTransactionActive>(FlagOffset(), value ? 1u : 0u);
}
- bool IsAccessible() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsAccessible() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldBoolean(FlagOffset());
}
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 88d75ab..3d54029 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -101,7 +101,7 @@
}
void operator()(Object* obj, size_t usable_size) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
UNUSED(usable_size);
// Avoid AsArray as object is not yet in live bitmap or allocation stack.
Array* array = down_cast<Array*>(obj);
@@ -126,7 +126,7 @@
}
void operator()(Object* obj, size_t usable_size) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Avoid AsArray as object is not yet in live bitmap or allocation stack.
Array* array = down_cast<Array*>(obj);
// DCHECK(array->IsArrayInstance());
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index d72c03f..4128689 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -43,7 +43,7 @@
static Array* RecursiveCreateMultiArray(Thread* self,
Handle<Class> array_class, int current_dimension,
Handle<mirror::IntArray> dimensions)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
int32_t array_length = dimensions->Get(current_dimension);
StackHandleScope<1> hs(self);
Handle<Array> new_array(
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index e65611d..b27a884 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -39,21 +39,21 @@
template <bool kIsInstrumented, bool kFillUsable = false>
ALWAYS_INLINE static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
size_t component_size_shift, gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static Array* CreateMultiArray(Thread* self, Handle<Class> element_class,
Handle<IntArray> dimensions)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE int32_t GetLength() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Array, length_));
}
- void SetLength(int32_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetLength(int32_t length) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK_GE(length, 0);
// We use non transactional version since we can't undo this write. We also disable checking
// since it would fail during a transaction.
@@ -67,7 +67,7 @@
static MemberOffset DataOffset(size_t component_size);
void* GetRawData(size_t component_size, int32_t index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
intptr_t data = reinterpret_cast<intptr_t>(this) + DataOffset(component_size).Int32Value() +
+ (index * component_size);
return reinterpret_cast<void*>(data);
@@ -82,16 +82,18 @@
// Returns true if the index is valid. If not, throws an ArrayIndexOutOfBoundsException and
// returns false.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) SHARED_REQUIRES(Locks::mutator_lock_);
- Array* CopyOf(Thread* self, int32_t new_length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Array* CopyOf(Thread* self, int32_t new_length) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
protected:
- void ThrowArrayStoreException(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ThrowArrayStoreException(Object* object) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
private:
void ThrowArrayIndexOutOfBoundsException(int32_t index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// The number of array elements.
int32_t length_;
@@ -107,32 +109,32 @@
typedef T ElementType;
static PrimitiveArray<T>* Alloc(Thread* self, size_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- const T* GetData() const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const T* GetData() const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
return reinterpret_cast<const T*>(GetRawData(sizeof(T), 0));
}
- T* GetData() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ T* GetData() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
return reinterpret_cast<T*>(GetRawData(sizeof(T), 0));
}
- T Get(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ T Get(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
- T GetWithoutChecks(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ T GetWithoutChecks(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(CheckIsValidIndex(i));
return GetData()[i];
}
- void Set(int32_t i, T value) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Set(int32_t i, T value) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ // SHARED_REQUIRES(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true>
void Set(int32_t i, T value) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS;
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ // SHARED_REQUIRES(Locks::mutator_lock_).
template<bool kTransactionActive,
bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -144,7 +146,7 @@
* and the arrays non-null.
*/
void Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Works like memcpy(), except we guarantee not to allow tearing of array values (ie using
@@ -152,7 +154,7 @@
* and the arrays non-null.
*/
void Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void SetArrayClass(Class* array_class) {
CHECK(array_class_.IsNull());
@@ -160,7 +162,7 @@
array_class_ = GcRoot<Class>(array_class);
}
- static Class* GetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static Class* GetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!array_class_.IsNull());
return array_class_.Read();
}
@@ -170,7 +172,7 @@
array_class_ = GcRoot<Class>(nullptr);
}
- static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
private:
static GcRoot<Class> array_class_;
@@ -183,11 +185,11 @@
public:
template<typename T>
T GetElementPtrSize(uint32_t idx, size_t ptr_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive = false, bool kUnchecked = false, typename T>
void SetElementPtrSize(uint32_t idx, T element, size_t ptr_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
};
} // namespace mirror
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 73a194d..6568487 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -680,6 +680,8 @@
// linked yet.
VisitStaticFieldsReferences<kVisitClass>(this, visitor);
}
+ // Since this class is reachable, we must also visit the associated roots when we scan it.
+ VisitNativeRoots(visitor, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
}
template<ReadBarrierOption kReadBarrierOption>
@@ -816,20 +818,22 @@
if (sfields != nullptr) {
for (size_t i = 0, count = NumStaticFields(); i < count; ++i) {
auto* f = &sfields[i];
+ // Visit roots first in case the declaring class gets moved.
+ f->VisitRoots(visitor);
if (kIsDebugBuild && IsResolved()) {
CHECK_EQ(f->GetDeclaringClass(), this) << GetStatus();
}
- f->VisitRoots(visitor);
}
}
ArtField* const ifields = GetIFieldsUnchecked();
if (ifields != nullptr) {
for (size_t i = 0, count = NumInstanceFields(); i < count; ++i) {
auto* f = &ifields[i];
+ // Visit roots first in case the declaring class gets moved.
+ f->VisitRoots(visitor);
if (kIsDebugBuild && IsResolved()) {
CHECK_EQ(f->GetDeclaringClass(), this) << GetStatus();
}
- f->VisitRoots(visitor);
}
}
// We may see GetDirectMethodsPtr() == null with NumDirectMethods() != 0 if the root marking
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 5bd6583..701ba4a 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -835,7 +835,7 @@
}
void operator()(mirror::Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
StackHandleScope<1> hs(self_);
Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass()));
mirror::Object::CopyObject(self_, h_new_class_obj.Get(), orig_->Get(), copy_bytes_);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index cc63dec..d95bcd8 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -127,7 +127,7 @@
};
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- Status GetStatus() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Status GetStatus() SHARED_REQUIRES(Locks::mutator_lock_) {
static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32");
return static_cast<Status>(
GetField32Volatile<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, status_)));
@@ -135,7 +135,7 @@
// This is static because 'this' may be moved by GC.
static void SetStatus(Handle<Class> h_this, Status new_status, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static MemberOffset StatusOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, status_);
@@ -143,146 +143,146 @@
// Returns true if the class has been retired.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsRetired() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsRetired() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() == kStatusRetired;
}
// Returns true if the class has failed to link.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsErroneous() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsErroneous() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() == kStatusError;
}
// Returns true if the class has been loaded.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsIdxLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsIdxLoaded() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusIdx;
}
// Returns true if the class has been loaded.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsLoaded() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusLoaded;
}
// Returns true if the class has been linked.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsResolved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsResolved() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusResolved;
}
// Returns true if the class was compile-time verified.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsCompileTimeVerified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsCompileTimeVerified() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusRetryVerificationAtRuntime;
}
// Returns true if the class has been verified.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsVerified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsVerified() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusVerified;
}
// Returns true if the class is initializing.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsInitializing() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsInitializing() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusInitializing;
}
// Returns true if the class is initialized.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsInitialized() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsInitialized() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() == kStatusInitialized;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset AccessFlagsOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_);
}
- void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetAccessFlags(uint32_t new_access_flags) SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if the class is an interface.
- ALWAYS_INLINE bool IsInterface() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsInterface() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccInterface) != 0;
}
// Returns true if the class is declared public.
- ALWAYS_INLINE bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccPublic) != 0;
}
// Returns true if the class is declared final.
- ALWAYS_INLINE bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccFinal) != 0;
}
- ALWAYS_INLINE bool IsFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsFinalizable() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccClassIsFinalizable) != 0;
}
- ALWAYS_INLINE void SetFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE void SetFinalizable() SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
SetAccessFlags(flags | kAccClassIsFinalizable);
}
- ALWAYS_INLINE bool IsStringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsStringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetField32(AccessFlagsOffset()) & kAccClassIsStringClass) != 0;
}
- ALWAYS_INLINE void SetStringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE void SetStringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
SetAccessFlags(flags | kAccClassIsStringClass);
}
// Returns true if the class is abstract.
- ALWAYS_INLINE bool IsAbstract() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsAbstract() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccAbstract) != 0;
}
// Returns true if the class is an annotation.
- ALWAYS_INLINE bool IsAnnotation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsAnnotation() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccAnnotation) != 0;
}
// Returns true if the class is synthetic.
- ALWAYS_INLINE bool IsSynthetic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsSynthetic() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccSynthetic) != 0;
}
// Returns true if the class can avoid access checks.
- bool IsPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPreverified() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccPreverified) != 0;
}
- void SetPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetPreverified() SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
SetAccessFlags(flags | kAccPreverified);
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsTypeOfReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsTypeOfReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags<kVerifyFlags>() & kAccClassIsReference) != 0;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsWeakReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsWeakReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags<kVerifyFlags>() & kAccClassIsWeakReference) != 0;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsSoftReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsSoftReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags<kVerifyFlags>() & kAccReferenceFlagsMask) == kAccClassIsReference;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsFinalizerReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsFinalizerReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags<kVerifyFlags>() & kAccClassIsFinalizerReference) != 0;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPhantomReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPhantomReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags<kVerifyFlags>() & kAccClassIsPhantomReference) != 0;
}
@@ -291,7 +291,7 @@
// For array classes, where all the classes are final due to there being no sub-classes, an
// Object[] may be assigned to by a String[] but a String[] may not be assigned to by other
// types as the component is final.
- bool CannotBeAssignedFromOtherTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool CannotBeAssignedFromOtherTypes() SHARED_REQUIRES(Locks::mutator_lock_) {
if (!IsArrayClass()) {
return IsFinal();
} else {
@@ -306,18 +306,19 @@
// Returns true if this class is the placeholder and should retire and
// be replaced with a class with the right size for embedded imt/vtable.
- bool IsTemp() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsTemp() SHARED_REQUIRES(Locks::mutator_lock_) {
Status s = GetStatus();
return s < Status::kStatusResolving && ShouldHaveEmbeddedImtAndVTable();
}
- String* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns the cached name.
- void SetName(String* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Sets the cached name.
+ String* GetName() SHARED_REQUIRES(Locks::mutator_lock_); // Returns the cached name.
+ void SetName(String* name) SHARED_REQUIRES(Locks::mutator_lock_); // Sets the cached name.
// Computes the name, then sets the cached value.
- static String* ComputeName(Handle<Class> h_this) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static String* ComputeName(Handle<Class> h_this) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsProxyClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsProxyClass() SHARED_REQUIRES(Locks::mutator_lock_) {
// Read access flags without using getter as whether something is a proxy can be check in
// any loaded state
// TODO: switch to a check if the super class is java.lang.reflect.Proxy?
@@ -326,9 +327,9 @@
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- Primitive::Type GetPrimitiveType() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Primitive::Type GetPrimitiveType() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
- void SetPrimitiveType(Primitive::Type new_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetPrimitiveType(Primitive::Type new_type) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t));
int32_t v32 = static_cast<int32_t>(new_type);
DCHECK_EQ(v32 & 0xFFFF, v32) << "upper 16 bits aren't zero";
@@ -338,81 +339,82 @@
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- size_t GetPrimitiveTypeSizeShift() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t GetPrimitiveTypeSizeShift() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if the class is a primitive type.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitive() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitive() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() != Primitive::kPrimNot;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveBoolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveBoolean() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimBoolean;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveByte() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveByte() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimByte;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveChar() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveChar() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimChar;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveShort() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveShort() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimShort;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveInt() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveInt() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType() == Primitive::kPrimInt;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveLong() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveLong() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimLong;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveFloat() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveFloat() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimFloat;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveDouble() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveDouble() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimDouble;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveVoid() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveVoid() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimVoid;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveArray() SHARED_REQUIRES(Locks::mutator_lock_) {
return IsArrayClass<kVerifyFlags>() &&
GetComponentType<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()->
IsPrimitive();
}
// Depth of class from java.lang.Object
- uint32_t Depth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t Depth() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool IsArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsClassClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsClassClass() SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsThrowableClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsThrowableClass() SHARED_REQUIRES(Locks::mutator_lock_);
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsReferenceClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsReferenceClass() const SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset ComponentTypeOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, component_type_);
@@ -420,9 +422,9 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Class* GetComponentType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Class* GetComponentType() SHARED_REQUIRES(Locks::mutator_lock_);
- void SetComponentType(Class* new_component_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetComponentType(Class* new_component_type) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(GetComponentType() == nullptr);
DCHECK(new_component_type != nullptr);
// Component type is invariant: use non-transactional mode without check.
@@ -430,43 +432,43 @@
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- size_t GetComponentSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t GetComponentSize() SHARED_REQUIRES(Locks::mutator_lock_) {
return 1U << GetComponentSizeShift();
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- size_t GetComponentSizeShift() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t GetComponentSizeShift() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetComponentType<kDefaultVerifyFlags, kReadBarrierOption>()->GetPrimitiveTypeSizeShift();
}
- bool IsObjectClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsObjectClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return !IsPrimitive() && GetSuperClass() == nullptr;
}
- bool IsInstantiableNonArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsInstantiableNonArray() SHARED_REQUIRES(Locks::mutator_lock_) {
return !IsPrimitive() && !IsInterface() && !IsAbstract() && !IsArrayClass();
}
- bool IsInstantiable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsInstantiable() SHARED_REQUIRES(Locks::mutator_lock_) {
return (!IsPrimitive() && !IsInterface() && !IsAbstract()) ||
(IsAbstract() && IsArrayClass());
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsObjectArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsObjectArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetComponentType<kVerifyFlags>() != nullptr &&
!GetComponentType<kVerifyFlags>()->IsPrimitive();
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsIntArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsIntArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
auto* component_type = GetComponentType<kVerifyFlags>();
return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>();
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsLongArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsLongArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
auto* component_type = GetComponentType<kVerifyFlags>();
return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>();
@@ -475,16 +477,16 @@
// Creates a raw object instance but does not invoke the default constructor.
template<bool kIsInstrumented, bool kCheckAddFinalizer = true>
ALWAYS_INLINE Object* Alloc(Thread* self, gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
Object* AllocObject(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
Object* AllocNonMovableObject(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsVariableSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsVariableSize() SHARED_REQUIRES(Locks::mutator_lock_) {
// Classes, arrays, and strings vary in size, and so the object_size_ field cannot
// be used to Get their instance size
return IsClassClass<kVerifyFlags, kReadBarrierOption>() ||
@@ -493,17 +495,17 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- uint32_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_));
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- uint32_t GetClassSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t GetClassSize() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_));
}
void SetClassSize(uint32_t new_class_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Compute how many bytes would be used a class with the given elements.
static uint32_t ComputeClassSize(bool has_embedded_tables,
@@ -529,31 +531,31 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- uint32_t GetObjectSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t GetObjectSize() SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset ObjectSizeOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, object_size_);
}
- void SetObjectSize(uint32_t new_object_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetObjectSize(uint32_t new_object_size) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!IsVariableSize());
// Not called within a transaction.
return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
}
void SetObjectSizeWithoutChecks(uint32_t new_object_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Not called within a transaction.
return SetField32<false, false, kVerifyNone>(
OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
}
// Returns true if this class is in the same packages as that class.
- bool IsInSamePackage(Class* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsInSamePackage(Class* that) SHARED_REQUIRES(Locks::mutator_lock_);
static bool IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2);
// Returns true if this class can access that class.
- bool CanAccess(Class* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool CanAccess(Class* that) SHARED_REQUIRES(Locks::mutator_lock_) {
return that->IsPublic() || this->IsInSamePackage(that);
}
@@ -561,7 +563,7 @@
// Note that access to the class isn't checked in case the declaring class is protected and the
// method has been exposed by a public sub-class
bool CanAccessMember(Class* access_to, uint32_t member_flags)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Classes can access all of their own members
if (this == access_to) {
return true;
@@ -589,34 +591,34 @@
// referenced by the FieldId in the DexFile in case the declaring class is inaccessible.
bool CanAccessResolvedField(Class* access_to, ArtField* field,
DexCache* dex_cache, uint32_t field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool CheckResolvedFieldAccess(Class* access_to, ArtField* field,
uint32_t field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can this class access a resolved method?
// Note that access to methods's class is checked and this may require looking up the class
// referenced by the MethodId in the DexFile in case the declaring class is inaccessible.
bool CanAccessResolvedMethod(Class* access_to, ArtMethod* resolved_method,
DexCache* dex_cache, uint32_t method_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template <InvokeType throw_invoke_type>
bool CheckResolvedMethodAccess(Class* access_to, ArtMethod* resolved_method,
uint32_t method_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsSubClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsSubClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
// Can src be assigned to this class? For example, String can be assigned to Object (by an
// upcast), however, an Object cannot be assigned to a String as a potentially exception throwing
// downcast would be necessary. Similarly for interfaces, a class that implements (or an interface
// that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign
// to themselves. Classes for primitive types may not assign to each other.
- ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE Class* GetSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE Class* GetSuperClass() SHARED_REQUIRES(Locks::mutator_lock_);
- void SetSuperClass(Class *new_super_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetSuperClass(Class *new_super_class) SHARED_REQUIRES(Locks::mutator_lock_) {
// Super class is assigned once, except during class linker initialization.
Class* old_super_class = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
DCHECK(old_super_class == nullptr || old_super_class == new_super_class);
@@ -624,7 +626,7 @@
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class);
}
- bool HasSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool HasSuperClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetSuperClass() != nullptr;
}
@@ -632,9 +634,9 @@
return MemberOffset(OFFSETOF_MEMBER(Class, super_class_));
}
- ClassLoader* GetClassLoader() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ClassLoader* GetClassLoader() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
- void SetClassLoader(ClassLoader* new_cl) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetClassLoader(ClassLoader* new_cl) SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset DexCacheOffset() {
return MemberOffset(OFFSETOF_MEMBER(Class, dex_cache_));
@@ -646,83 +648,83 @@
kDumpClassInitialized = (1 << 2),
};
- void DumpClass(std::ostream& os, int flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void DumpClass(std::ostream& os, int flags) SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
// Also updates the dex_cache_strings_ variable from new_dex_cache.
- void SetDexCache(DexCache* new_dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetDexCache(DexCache* new_dex_cache) SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE StrideIterator<ArtMethod> DirectMethodsBegin(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE StrideIterator<ArtMethod> DirectMethodsEnd(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* GetDirectMethodsPtr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);\
+ ArtMethod* GetDirectMethodsPtr() SHARED_REQUIRES(Locks::mutator_lock_);
void SetDirectMethodsPtr(ArtMethod* new_direct_methods)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Used by image writer.
void SetDirectMethodsPtrUnchecked(ArtMethod* new_direct_methods)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetDirectMethod(size_t i, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Use only when we are allocating populating the method arrays.
ALWAYS_INLINE ArtMethod* GetDirectMethodUnchecked(size_t i, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetVirtualMethodUnchecked(size_t i, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the number of static, private, and constructor methods.
- ALWAYS_INLINE uint32_t NumDirectMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE uint32_t NumDirectMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_direct_methods_));
}
- void SetNumDirectMethods(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetNumDirectMethods(uint32_t num) SHARED_REQUIRES(Locks::mutator_lock_) {
return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_direct_methods_), num);
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE ArtMethod* GetVirtualMethodsPtr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE ArtMethod* GetVirtualMethodsPtr() SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE StrideIterator<ArtMethod> VirtualMethodsBegin(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE StrideIterator<ArtMethod> VirtualMethodsEnd(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetVirtualMethods(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void SetVirtualMethodsPtr(ArtMethod* new_virtual_methods)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the number of non-inherited virtual methods.
- ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_virtual_methods_));
}
- void SetNumVirtualMethods(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetNumVirtualMethods(uint32_t num) SHARED_REQUIRES(Locks::mutator_lock_) {
return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_virtual_methods_), num);
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ArtMethod* GetVirtualMethod(size_t i, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* GetVirtualMethodDuringLinking(size_t i, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE PointerArray* GetVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE PointerArray* GetVTable() SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE PointerArray* GetVTableDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE PointerArray* GetVTableDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_);
- void SetVTable(PointerArray* new_vtable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetVTable(PointerArray* new_vtable) SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset VTableOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, vtable_);
@@ -732,362 +734,363 @@
return MemberOffset(sizeof(Class));
}
- bool ShouldHaveEmbeddedImtAndVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool ShouldHaveEmbeddedImtAndVTable() SHARED_REQUIRES(Locks::mutator_lock_) {
return IsInstantiable();
}
- bool HasVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool HasVTable() SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset EmbeddedImTableEntryOffset(uint32_t i, size_t pointer_size);
static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size);
ArtMethod* GetEmbeddedImTableEntry(uint32_t i, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- int32_t GetVTableLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t GetVTableLength() SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* GetVTableEntry(uint32_t i, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- int32_t GetEmbeddedVTableLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t GetEmbeddedVTableLength() SHARED_REQUIRES(Locks::mutator_lock_);
- void SetEmbeddedVTableLength(int32_t len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetEmbeddedVTableLength(int32_t len) SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* GetEmbeddedVTableEntry(uint32_t i, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
inline void SetEmbeddedVTableEntryUnchecked(uint32_t i, ArtMethod* method, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void PopulateEmbeddedImtAndVTable(ArtMethod* const (&methods)[kImtSize], size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Given a method implemented by this class but potentially from a super class, return the
// specific implementation method for this class.
ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Given a method implemented by this class' super class, return the specific implementation
// method for this class.
ArtMethod* FindVirtualMethodForSuper(ArtMethod* method, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Given a method implemented by this class, but potentially from a
// super class or interface, return the specific implementation
// method for this class.
ArtMethod* FindVirtualMethodForInterface(ArtMethod* method, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE;
+ SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE;
ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* FindClassInitializer(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* FindClassInitializer(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE int32_t GetIfTableCount() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE int32_t GetIfTableCount() SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE IfTable* GetIfTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE IfTable* GetIfTable() SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) SHARED_REQUIRES(Locks::mutator_lock_);
// Get instance fields of the class (See also GetSFields).
- ArtField* GetIFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* GetIFields() SHARED_REQUIRES(Locks::mutator_lock_);
- void SetIFields(ArtField* new_ifields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetIFields(ArtField* new_ifields) SHARED_REQUIRES(Locks::mutator_lock_);
// Unchecked edition has no verification flags.
- void SetIFieldsUnchecked(ArtField* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetIFieldsUnchecked(ArtField* new_sfields) SHARED_REQUIRES(Locks::mutator_lock_);
- uint32_t NumInstanceFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t NumInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_instance_fields_));
}
- void SetNumInstanceFields(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetNumInstanceFields(uint32_t num) SHARED_REQUIRES(Locks::mutator_lock_) {
return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_instance_fields_), num);
}
- ArtField* GetInstanceField(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* GetInstanceField(uint32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the number of instance fields containing reference types.
- uint32_t NumReferenceInstanceFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t NumReferenceInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsResolved() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
}
- uint32_t NumReferenceInstanceFieldsDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t NumReferenceInstanceFieldsDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsLoaded() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
}
- void SetNumReferenceInstanceFields(uint32_t new_num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetNumReferenceInstanceFields(uint32_t new_num) SHARED_REQUIRES(Locks::mutator_lock_) {
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), new_num);
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- uint32_t GetReferenceInstanceOffsets() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t GetReferenceInstanceOffsets() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
void SetReferenceInstanceOffsets(uint32_t new_reference_offsets)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get the offset of the first reference instance field. Other reference instance fields follow.
MemberOffset GetFirstReferenceInstanceFieldOffset()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the number of static fields containing reference types.
- uint32_t NumReferenceStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t NumReferenceStaticFields() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsResolved() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
}
- uint32_t NumReferenceStaticFieldsDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t NumReferenceStaticFieldsDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsLoaded() || IsErroneous() || IsRetired());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
}
- void SetNumReferenceStaticFields(uint32_t new_num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetNumReferenceStaticFields(uint32_t new_num) SHARED_REQUIRES(Locks::mutator_lock_) {
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), new_num);
}
// Get the offset of the first reference static field. Other reference static fields follow.
MemberOffset GetFirstReferenceStaticFieldOffset(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get the offset of the first reference static field. Other reference static fields follow.
MemberOffset GetFirstReferenceStaticFieldOffsetDuringLinking(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Gets the static fields of the class.
- ArtField* GetSFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* GetSFields() SHARED_REQUIRES(Locks::mutator_lock_);
- void SetSFields(ArtField* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetSFields(ArtField* new_sfields) SHARED_REQUIRES(Locks::mutator_lock_);
// Unchecked edition has no verification flags.
- void SetSFieldsUnchecked(ArtField* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetSFieldsUnchecked(ArtField* new_sfields) SHARED_REQUIRES(Locks::mutator_lock_);
- uint32_t NumStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t NumStaticFields() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_static_fields_));
}
- void SetNumStaticFields(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetNumStaticFields(uint32_t num) SHARED_REQUIRES(Locks::mutator_lock_) {
return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_static_fields_), num);
}
// TODO: uint16_t
- ArtField* GetStaticField(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* GetStaticField(uint32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
// Find a static or instance field using the JLS resolution order
static ArtField* FindField(Thread* self, Handle<Class> klass, const StringPiece& name,
const StringPiece& type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Finds the given instance field in this class or a superclass.
ArtField* FindInstanceField(const StringPiece& name, const StringPiece& type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Finds the given instance field in this class or a superclass, only searches classes that
// have the same dex cache.
ArtField* FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtField* FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtField* FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Finds the given static field in this class or a superclass.
static ArtField* FindStaticField(Thread* self, Handle<Class> klass, const StringPiece& name,
const StringPiece& type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Finds the given static field in this class or superclass, only searches classes that
// have the same dex cache.
static ArtField* FindStaticField(Thread* self, Handle<Class> klass, const DexCache* dex_cache,
uint32_t dex_field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtField* FindDeclaredStaticField(const StringPiece& name, const StringPiece& type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtField* FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- pid_t GetClinitThreadId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ pid_t GetClinitThreadId() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsIdxLoaded() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_));
}
- void SetClinitThreadId(pid_t new_clinit_thread_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetClinitThreadId(pid_t new_clinit_thread_id) SHARED_REQUIRES(Locks::mutator_lock_);
- Class* GetVerifyErrorClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Class* GetVerifyErrorClass() SHARED_REQUIRES(Locks::mutator_lock_) {
// DCHECK(IsErroneous());
return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_));
}
- uint16_t GetDexClassDefIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint16_t GetDexClassDefIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_));
}
- void SetDexClassDefIndex(uint16_t class_def_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetDexClassDefIndex(uint16_t class_def_idx) SHARED_REQUIRES(Locks::mutator_lock_) {
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_), class_def_idx);
}
- uint16_t GetDexTypeIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint16_t GetDexTypeIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_));
}
- void SetDexTypeIndex(uint16_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetDexTypeIndex(uint16_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_) {
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), type_idx);
}
- static Class* GetJavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static Class* GetJavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(HasJavaLangClass());
return java_lang_Class_.Read();
}
- static bool HasJavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static bool HasJavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return !java_lang_Class_.IsNull();
}
// Can't call this SetClass or else gets called instead of Object::SetClass in places.
- static void SetClassClass(Class* java_lang_Class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void SetClassClass(Class* java_lang_Class) SHARED_REQUIRES(Locks::mutator_lock_);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Visit native roots visits roots which are keyed off the native pointers such as ArtFields and
// ArtMethods.
template<class Visitor>
void VisitNativeRoots(Visitor& visitor, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// When class is verified, set the kAccPreverified flag on each method.
void SetPreverifiedFlagOnAllMethods(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template <bool kVisitClass, typename Visitor>
void VisitReferences(mirror::Class* klass, const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get the descriptor of the class. In a few cases a std::string is required, rather than
// always create one the storage argument is populated and its internal c_str() returned. We do
// this to avoid memory allocation in the common case.
- const char* GetDescriptor(std::string* storage) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const char* GetDescriptor(std::string* storage) SHARED_REQUIRES(Locks::mutator_lock_);
- const char* GetArrayDescriptor(std::string* storage) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const char* GetArrayDescriptor(std::string* storage) SHARED_REQUIRES(Locks::mutator_lock_);
- bool DescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool DescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
- const DexFile::ClassDef* GetClassDef() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const DexFile::ClassDef* GetClassDef() SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_REQUIRES(Locks::mutator_lock_);
- uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) SHARED_REQUIRES(Locks::mutator_lock_);
static mirror::Class* GetDirectInterface(Thread* self, Handle<mirror::Class> klass,
uint32_t idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- const char* GetSourceFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const char* GetSourceFile() SHARED_REQUIRES(Locks::mutator_lock_);
- std::string GetLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string GetLocation() SHARED_REQUIRES(Locks::mutator_lock_);
- const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const DexFile& GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
- const DexFile::TypeList* GetInterfaceTypeList() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const DexFile::TypeList* GetInterfaceTypeList() SHARED_REQUIRES(Locks::mutator_lock_);
// Asserts we are initialized or initializing in the given thread.
void AssertInitializedOrInitializingInThread(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
Class* CopyOf(Thread* self, int32_t new_length, ArtMethod* const (&imt)[mirror::Class::kImtSize],
- size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// For proxy class only.
- ObjectArray<Class>* GetInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ObjectArray<Class>* GetInterfaces() SHARED_REQUIRES(Locks::mutator_lock_);
// For proxy class only.
- ObjectArray<ObjectArray<Class>>* GetThrows() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ObjectArray<ObjectArray<Class>>* GetThrows() SHARED_REQUIRES(Locks::mutator_lock_);
// For reference class only.
- MemberOffset GetDisableIntrinsicFlagOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- MemberOffset GetSlowPathFlagOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool GetSlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetSlowPath(bool enabled) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ MemberOffset GetDisableIntrinsicFlagOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+ MemberOffset GetSlowPathFlagOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool GetSlowPathEnabled() SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetSlowPath(bool enabled) SHARED_REQUIRES(Locks::mutator_lock_);
- ObjectArray<String>* GetDexCacheStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ObjectArray<String>* GetDexCacheStrings() SHARED_REQUIRES(Locks::mutator_lock_);
void SetDexCacheStrings(ObjectArray<String>* new_dex_cache_strings)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset DexCacheStringsOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_strings_);
}
@@ -1095,7 +1098,7 @@
// May cause thread suspension due to EqualParameters.
ArtMethod* GetDeclaredConstructor(
Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Used to initialize a class in the allocation code path to ensure it is guarded by a StoreStore
// fence.
@@ -1105,7 +1108,7 @@
}
void operator()(mirror::Object* obj, size_t usable_size) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
const uint32_t class_size_;
@@ -1114,7 +1117,7 @@
};
// Returns true if the class loader is null, ie the class loader is the boot strap class loader.
- bool IsBootStrapClassLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsBootStrapClassLoaded() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetClassLoader() == nullptr;
}
@@ -1127,34 +1130,34 @@
}
ALWAYS_INLINE ArtMethod* GetDirectMethodsPtrUnchecked()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetVirtualMethodsPtrUnchecked()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
- void SetVerifyErrorClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetVerifyErrorClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
template <bool throw_on_failure, bool use_referrers_cache>
bool ResolvedFieldAccessTest(Class* access_to, ArtField* field,
uint32_t field_idx, DexCache* dex_cache)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template <bool throw_on_failure, bool use_referrers_cache, InvokeType throw_invoke_type>
bool ResolvedMethodAccessTest(Class* access_to, ArtMethod* resolved_method,
uint32_t method_idx, DexCache* dex_cache)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- bool Implements(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsArrayAssignableFromArray(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsAssignableFromArray(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool Implements(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsArrayAssignableFromArray(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsAssignableFromArray(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
- void CheckObjectAlloc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CheckObjectAlloc() SHARED_REQUIRES(Locks::mutator_lock_);
// Unchecked editions is for root visiting.
- ArtField* GetSFieldsUnchecked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtField* GetIFieldsUnchecked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* GetSFieldsUnchecked() SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtField* GetIFieldsUnchecked() SHARED_REQUIRES(Locks::mutator_lock_);
- bool ProxyDescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool ProxyDescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
// Check that the pointer size mathces the one in the class linker.
ALWAYS_INLINE static void CheckPointerSize(size_t pointer_size);
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index b10a296..134f1cd 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -32,7 +32,7 @@
static constexpr uint32_t InstanceSize() {
return sizeof(ClassLoader);
}
- ClassLoader* GetParent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ClassLoader* GetParent() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, parent_));
}
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 0ce83ec..ba49a15 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -48,12 +48,12 @@
void Init(const DexFile* dex_file, String* location, ObjectArray<String>* strings,
ObjectArray<Class>* types, PointerArray* methods, PointerArray* fields,
- size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
void Fixup(ArtMethod* trampoline, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- String* GetLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ String* GetLocation() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
}
@@ -73,76 +73,76 @@
return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_methods_);
}
- size_t NumStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t NumStrings() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStrings()->GetLength();
}
- size_t NumResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t NumResolvedTypes() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetResolvedTypes()->GetLength();
}
- size_t NumResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t NumResolvedMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetResolvedMethods()->GetLength();
}
- size_t NumResolvedFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t NumResolvedFields() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetResolvedFields()->GetLength();
}
- String* GetResolvedString(uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ String* GetResolvedString(uint32_t string_idx) SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStrings()->Get(string_idx);
}
void SetResolvedString(uint32_t string_idx, String* resolved) ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// TODO default transaction support.
GetStrings()->Set(string_idx, resolved);
}
Class* GetResolvedType(uint32_t type_idx) ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return GetResolvedTypes()->Get(type_idx);
}
void SetResolvedType(uint32_t type_idx, Class* resolved)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, size_t ptr_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx, ArtMethod* resolved, size_t ptr_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Pointer sized variant, used for patching.
ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, size_t ptr_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Pointer sized variant, used for patching.
ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, size_t ptr_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- ObjectArray<String>* GetStrings() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ObjectArray<String>* GetStrings() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<ObjectArray<String>>(StringsOffset());
}
- ObjectArray<Class>* GetResolvedTypes() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ObjectArray<Class>* GetResolvedTypes() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<ObjectArray<Class>>(
OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_));
}
- PointerArray* GetResolvedMethods() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ PointerArray* GetResolvedMethods() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<PointerArray>(ResolvedMethodsOffset());
}
- PointerArray* GetResolvedFields() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ PointerArray* GetResolvedFields() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<PointerArray>(ResolvedFieldsOffset());
}
- const DexFile* GetDexFile() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DexFile* GetDexFile() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_));
}
- void SetDexFile(const DexFile* dex_file) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ void SetDexFile(const DexFile* dex_file) SHARED_REQUIRES(Locks::mutator_lock_)
ALWAYS_INLINE {
return SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file);
}
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index d927f0c..edaddbd 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -36,66 +36,66 @@
// C++ mirror of java.lang.reflect.Field.
class MANAGED Field : public AccessibleObject {
public:
- static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return static_class_.Read();
}
- static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return array_class_.Read();
}
- ALWAYS_INLINE uint32_t GetDexFieldIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE uint32_t GetDexFieldIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_));
}
- mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_));
}
- uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_));
}
- bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccStatic) != 0;
}
- bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccFinal) != 0;
}
- bool IsVolatile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsVolatile() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccVolatile) != 0;
}
ALWAYS_INLINE Primitive::Type GetTypeAsPrimitiveType()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return GetType()->GetPrimitiveType();
}
- mirror::Class* GetType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* GetType() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<mirror::Class>(OFFSET_OF_OBJECT_MEMBER(Field, type_));
}
- int32_t GetOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ int32_t GetOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_));
}
- static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
- static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
- static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
// Slow, try to use only for PrettyField and such.
- ArtField* GetArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* GetArtField() SHARED_REQUIRES(Locks::mutator_lock_);
template <bool kTransactionActive = false>
static mirror::Field* CreateFromArtField(Thread* self, ArtField* field,
bool force_resolve)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
private:
HeapReference<mirror::Class> declaring_class_;
@@ -105,27 +105,27 @@
int32_t offset_;
template<bool kTransactionActive>
- void SetDeclaringClass(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetDeclaringClass(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) {
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), c);
}
template<bool kTransactionActive>
- void SetType(mirror::Class* type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetType(mirror::Class* type) SHARED_REQUIRES(Locks::mutator_lock_) {
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, type_), type);
}
template<bool kTransactionActive>
- void SetAccessFlags(uint32_t flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetAccessFlags(uint32_t flags) SHARED_REQUIRES(Locks::mutator_lock_) {
SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_), flags);
}
template<bool kTransactionActive>
- void SetDexFieldIndex(uint32_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetDexFieldIndex(uint32_t idx) SHARED_REQUIRES(Locks::mutator_lock_) {
SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_), idx);
}
template<bool kTransactionActive>
- void SetOffset(uint32_t offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetOffset(uint32_t offset) SHARED_REQUIRES(Locks::mutator_lock_) {
SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, offset_), offset);
}
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index 1ea5bee..b21ecdf 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -25,34 +25,34 @@
class MANAGED IfTable FINAL : public ObjectArray<Object> {
public:
- ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
Class* interface = GetWithoutChecks((i * kMax) + kInterface)->AsClass();
DCHECK(interface != nullptr);
return interface;
}
ALWAYS_INLINE void SetInterface(int32_t i, Class* interface)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- PointerArray* GetMethodArray(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ PointerArray* GetMethodArray(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
auto* method_array = down_cast<PointerArray*>(Get((i * kMax) + kMethodArray));
DCHECK(method_array != nullptr);
return method_array;
}
- size_t GetMethodArrayCount(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t GetMethodArrayCount(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
auto* method_array = down_cast<PointerArray*>(Get((i * kMax) + kMethodArray));
return method_array == nullptr ? 0u : method_array->GetLength();
}
- void SetMethodArray(int32_t i, PointerArray* arr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetMethodArray(int32_t i, PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(arr != nullptr);
auto idx = i * kMax + kMethodArray;
DCHECK(Get(idx) == nullptr);
Set<false>(idx, arr);
}
- size_t Count() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t Count() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetLength() / kMax;
}
diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h
index 42c76c0..0c28e4f 100644
--- a/runtime/mirror/method.h
+++ b/runtime/mirror/method.h
@@ -29,25 +29,25 @@
class MANAGED Method : public AbstractMethod {
public:
static Method* CreateFromArtMethod(Thread* self, ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return static_class_.Read();
}
- static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
- static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
- static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return array_class_.Read();
}
- static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
- static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
- static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
private:
static GcRoot<Class> static_class_; // java.lang.reflect.Method.class.
@@ -60,25 +60,25 @@
class MANAGED Constructor: public AbstractMethod {
public:
static Constructor* CreateFromArtMethod(Thread* self, ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return static_class_.Read();
}
- static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
- static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
- static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return array_class_.Read();
}
- static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
- static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
- static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
private:
static GcRoot<Class> static_class_; // java.lang.reflect.Constructor.class.
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index e019d5a..c5610b5 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -477,7 +477,7 @@
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
inline void Object::SetFieldBoolean(MemberOffset field_offset, uint8_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
@@ -495,7 +495,7 @@
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
inline void Object::SetFieldByte(MemberOffset field_offset, int8_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index b177e2f..80decaa 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -47,7 +47,7 @@
: dest_obj_(dest_obj) {}
void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
- ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
// GetFieldObject() contains a RB.
Object* ref = obj->GetFieldObject<Object>(offset);
// No WB here as a large object space does not have a card table
@@ -56,13 +56,18 @@
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
// Copy java.lang.ref.Reference.referent which isn't visited in
// Object::VisitReferences().
DCHECK(klass->IsTypeOfReferenceClass());
this->operator()(ref, mirror::Reference::ReferentOffset(), false);
}
+ // Unused since we don't copy class native roots.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+ const {}
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+
private:
Object* const dest_obj_;
};
@@ -107,7 +112,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Object::CopyObject(self_, obj, orig_->Get(), num_bytes_);
}
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index f1c96b5..eea9f37 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -84,40 +84,40 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE Class* GetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE Class* GetClass() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetClass(Class* new_klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetClass(Class* new_klass) SHARED_REQUIRES(Locks::mutator_lock_);
- Object* GetReadBarrierPointer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Object* GetReadBarrierPointer() SHARED_REQUIRES(Locks::mutator_lock_);
#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
NO_RETURN
#endif
- void SetReadBarrierPointer(Object* rb_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetReadBarrierPointer(Object* rb_ptr) SHARED_REQUIRES(Locks::mutator_lock_);
#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
NO_RETURN
#endif
bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void AssertReadBarrierPointer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ void AssertReadBarrierPointer() const SHARED_REQUIRES(Locks::mutator_lock_);
// The verifier treats all interfaces as java.lang.Object and relies on runtime checks in
// invoke-interface to detect incompatible interface types.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool VerifierInstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool VerifierInstanceOf(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool InstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE bool InstanceOf(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_);
- Object* Clone(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Object* Clone(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
int32_t IdentityHashCode() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
static MemberOffset MonitorOffset() {
return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
@@ -126,298 +126,298 @@
// As_volatile can be false if the mutators are suspended. This is an optimization since it
// avoids the barriers.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- LockWord GetLockWord(bool as_volatile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ LockWord GetLockWord(bool as_volatile) SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetLockWord(LockWord new_val, bool as_volatile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetLockWord(LockWord new_val, bool as_volatile) SHARED_REQUIRES(Locks::mutator_lock_);
bool CasLockWordWeakSequentiallyConsistent(LockWord old_val, LockWord new_val)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
uint32_t GetLockOwnerThreadId();
- mirror::Object* MonitorEnter(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ mirror::Object* MonitorEnter(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
EXCLUSIVE_LOCK_FUNCTION();
- bool MonitorExit(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ bool MonitorExit(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
UNLOCK_FUNCTION();
- void Notify(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void NotifyAll(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Wait(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Notify(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+ void NotifyAll(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Wait(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsClass() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Class* AsClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Class* AsClass() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ObjectArray<T>* AsObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ObjectArray<T>* AsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsArrayInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsArrayInstance() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Array* AsArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Array* AsArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- BooleanArray* AsBooleanArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ BooleanArray* AsBooleanArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ByteArray* AsByteArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ByteArray* AsByteArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ByteArray* AsByteSizedArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ByteArray* AsByteSizedArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- CharArray* AsCharArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ CharArray* AsCharArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ShortArray* AsShortArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ShortArray* AsShortArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ShortArray* AsShortSizedArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ShortArray* AsShortSizedArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsIntArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsIntArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- IntArray* AsIntArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ IntArray* AsIntArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsLongArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsLongArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- LongArray* AsLongArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ LongArray* AsLongArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsFloatArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsFloatArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- FloatArray* AsFloatArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ FloatArray* AsFloatArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsDoubleArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsDoubleArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- DoubleArray* AsDoubleArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ DoubleArray* AsDoubleArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsString() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- String* AsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ String* AsString() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- Throwable* AsThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Throwable* AsThrowable() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- Reference* AsReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Reference* AsReference() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsWeakReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsWeakReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsSoftReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsSoftReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsFinalizerReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsFinalizerReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- FinalizerReference* AsFinalizerReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ FinalizerReference* AsFinalizerReference() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPhantomReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsPhantomReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
// Accessor for Java type fields.
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier, bool kIsVolatile = false>
ALWAYS_INLINE T* GetFieldObject(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE T* GetFieldObjectVolatile(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldObject(MemberOffset field_offset, Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldObjectVolatile(MemberOffset field_offset, Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset,
Object* old_value,
Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset,
Object* old_value,
Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE uint8_t GetFieldBoolean(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int8_t GetFieldByte(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE uint8_t GetFieldBooleanVolatile(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int8_t GetFieldByteVolatile(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldBoolean(MemberOffset field_offset, uint8_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldByte(MemberOffset field_offset, int8_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldBooleanVolatile(MemberOffset field_offset, uint8_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldByteVolatile(MemberOffset field_offset, int8_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE uint16_t GetFieldChar(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int16_t GetFieldShort(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE uint16_t GetFieldCharVolatile(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int16_t GetFieldShortVolatile(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldChar(MemberOffset field_offset, uint16_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldShort(MemberOffset field_offset, int16_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldCharVolatile(MemberOffset field_offset, uint16_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldShortVolatile(MemberOffset field_offset, int16_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int32_t GetField32Volatile(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetField32(MemberOffset field_offset, int32_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetField32Volatile(MemberOffset field_offset, int32_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE bool CasFieldWeakSequentiallyConsistent32(MemberOffset field_offset,
int32_t old_value, int32_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakRelaxed32(MemberOffset field_offset, int32_t old_value,
int32_t new_value) ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value,
int32_t new_value) ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int64_t GetField64(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int64_t GetField64Volatile(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetField64(MemberOffset field_offset, int64_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetField64Volatile(MemberOffset field_offset, int64_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value,
int64_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value,
int64_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
void SetFieldPtr(MemberOffset field_offset, T new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>(
field_offset, new_value, sizeof(void*));
}
@@ -426,7 +426,7 @@
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset, T new_value,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
if (pointer_size == 4) {
intptr_t ptr = reinterpret_cast<intptr_t>(new_value);
@@ -439,13 +439,13 @@
}
}
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ // SHARED_REQUIRES(Locks::mutator_lock_).
template <const bool kVisitClass, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
typename Visitor, typename JavaLangRefVisitor = VoidFunctor>
void VisitReferences(const Visitor& visitor, const JavaLangRefVisitor& ref_visitor)
NO_THREAD_SAFETY_ANALYSIS;
- ArtField* FindFieldByOffset(MemberOffset offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* FindFieldByOffset(MemberOffset offset) SHARED_REQUIRES(Locks::mutator_lock_);
// Used by object_test.
static void SetHashCodeSeed(uint32_t new_seed);
@@ -456,13 +456,13 @@
// Accessors for non-Java type fields
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
T GetFieldPtr(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, sizeof(void*));
}
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
if (pointer_size == 4) {
return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
@@ -480,25 +480,25 @@
NO_THREAD_SAFETY_ANALYSIS;
template<bool kVisitClass, typename Visitor>
void VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kVisitClass, typename Visitor>
void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
template<typename kSize, bool kIsVolatile>
ALWAYS_INLINE void SetField(MemberOffset field_offset, kSize new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<typename kSize, bool kIsVolatile>
ALWAYS_INLINE kSize GetField(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Verify the type correctness of stores to fields.
// TODO: This can cause thread suspension and isn't moving GC safe.
void CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CheckFieldAssignment(MemberOffset field_offset, Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (kCheckFieldAssignments) {
CheckFieldAssignmentImpl(field_offset, new_value);
}
@@ -509,7 +509,7 @@
// Class::CopyOf().
static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src,
size_t num_bytes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static Atomic<uint32_t> hash_code_seed;
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index 5eddc18..607b000 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -32,21 +32,21 @@
static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length,
gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- T* Get(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ T* Get(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if the object can be stored into the array. If not, throws
// an ArrayStoreException and returns false.
- // TODO fix thread safety analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ // TODO fix thread safety analysis: should be SHARED_REQUIRES(Locks::mutator_lock_).
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CheckAssignable(T* object) NO_THREAD_SAFETY_ANALYSIS;
- ALWAYS_INLINE void Set(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // TODO fix thread safety analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ ALWAYS_INLINE void Set(int32_t i, T* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ // TODO fix thread safety analysis: should be SHARED_REQUIRES(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void Set(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
@@ -54,37 +54,37 @@
// Set element without bound and element type checks, to be used in limited
// circumstances, such as during boot image writing.
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ // SHARED_REQUIRES(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetWithoutChecks(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ // SHARED_REQUIRES(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetWithoutChecksAndWriteBarrier(int32_t i, T* object)
NO_THREAD_SAFETY_ANALYSIS;
- ALWAYS_INLINE T* GetWithoutChecks(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE T* GetWithoutChecks(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
// Copy src into this array (dealing with overlaps as memmove does) without assignability checks.
void AssignableMemmove(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
- int32_t count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t count) SHARED_REQUIRES(Locks::mutator_lock_);
// Copy src into this array assuming no overlap and without assignability checks.
void AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
- int32_t count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t count) SHARED_REQUIRES(Locks::mutator_lock_);
// Copy src into this array with assignability checks.
void AssignableCheckingMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
int32_t count, bool throw_exception)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ObjectArray<T>* CopyOf(Thread* self, int32_t new_length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ // SHARED_REQUIRES(Locks::mutator_lock_).
template<const bool kVisitClass, typename Visitor>
void VisitReferences(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 055be85..2a5c88e 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -33,11 +33,11 @@
template<bool kPoisonReferences, class MirrorType>
class MANAGED ObjectReference {
public:
- MirrorType* AsMirrorPtr() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ MirrorType* AsMirrorPtr() const SHARED_REQUIRES(Locks::mutator_lock_) {
return UnCompress();
}
- void Assign(MirrorType* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void Assign(MirrorType* other) SHARED_REQUIRES(Locks::mutator_lock_) {
reference_ = Compress(other);
}
@@ -56,18 +56,18 @@
protected:
ObjectReference<kPoisonReferences, MirrorType>(MirrorType* mirror_ptr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: reference_(Compress(mirror_ptr)) {
}
// Compress reference to its bit representation.
- static uint32_t Compress(MirrorType* mirror_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static uint32_t Compress(MirrorType* mirror_ptr) SHARED_REQUIRES(Locks::mutator_lock_) {
uintptr_t as_bits = reinterpret_cast<uintptr_t>(mirror_ptr);
return static_cast<uint32_t>(kPoisonReferences ? -as_bits : as_bits);
}
// Uncompress an encoded reference from its bit representation.
- MirrorType* UnCompress() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ MirrorType* UnCompress() const SHARED_REQUIRES(Locks::mutator_lock_) {
uintptr_t as_bits = kPoisonReferences ? -reference_ : reference_;
return reinterpret_cast<MirrorType*>(as_bits);
}
@@ -83,11 +83,11 @@
class MANAGED HeapReference : public ObjectReference<kPoisonHeapReferences, MirrorType> {
public:
static HeapReference<MirrorType> FromMirrorPtr(MirrorType* mirror_ptr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return HeapReference<MirrorType>(mirror_ptr);
}
private:
- HeapReference<MirrorType>(MirrorType* mirror_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ HeapReference<MirrorType>(MirrorType* mirror_ptr) SHARED_REQUIRES(Locks::mutator_lock_)
: ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}
};
@@ -95,16 +95,16 @@
template<class MirrorType>
class MANAGED CompressedReference : public mirror::ObjectReference<false, MirrorType> {
public:
- CompressedReference<MirrorType>() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ CompressedReference<MirrorType>() SHARED_REQUIRES(Locks::mutator_lock_)
: mirror::ObjectReference<false, MirrorType>(nullptr) {}
static CompressedReference<MirrorType> FromMirrorPtr(MirrorType* p)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return CompressedReference<MirrorType>(p);
}
private:
- CompressedReference<MirrorType>(MirrorType* p) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ CompressedReference<MirrorType>(MirrorType* p) SHARED_REQUIRES(Locks::mutator_lock_)
: mirror::ObjectReference<false, MirrorType>(p) {}
};
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 85ea28f..f5a0445 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -48,7 +48,7 @@
const char* utf8_in,
const char* utf16_expected_le,
int32_t expected_hash)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::unique_ptr<uint16_t[]> utf16_expected(new uint16_t[expected_utf16_length]);
for (int32_t i = 0; i < expected_utf16_length; i++) {
uint16_t ch = (((utf16_expected_le[i*2 + 0] & 0xff) << 8) |
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 4bbdb99..51ae760 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -62,49 +62,49 @@
return OFFSET_OF_OBJECT_MEMBER(Reference, referent_);
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Object* GetReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Object* GetReferent() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object, kDefaultVerifyFlags, kReadBarrierOption>(
ReferentOffset());
}
template<bool kTransactionActive>
- void SetReferent(Object* referent) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetReferent(Object* referent) SHARED_REQUIRES(Locks::mutator_lock_) {
SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent);
}
template<bool kTransactionActive>
- void ClearReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void ClearReferent() SHARED_REQUIRES(Locks::mutator_lock_) {
SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
}
// Volatile read/write is not necessary since the java pending next is only accessed from
// the java threads for cleared references. Once these cleared references have a null referent,
// we never end up reading their pending next from the GC again.
- Reference* GetPendingNext() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Reference* GetPendingNext() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<Reference>(PendingNextOffset());
}
template<bool kTransactionActive>
- void SetPendingNext(Reference* pending_next) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetPendingNext(Reference* pending_next) SHARED_REQUIRES(Locks::mutator_lock_) {
SetFieldObject<kTransactionActive>(PendingNextOffset(), pending_next);
}
- bool IsEnqueued() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsEnqueued() SHARED_REQUIRES(Locks::mutator_lock_) {
// Since the references are stored as cyclic lists it means that once enqueued, the pending
// next is always non-null.
return GetPendingNext() != nullptr;
}
- bool IsEnqueuable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsEnqueuable() SHARED_REQUIRES(Locks::mutator_lock_);
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- static Class* GetJavaLangRefReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static Class* GetJavaLangRefReference() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!java_lang_ref_Reference_.IsNull());
return java_lang_ref_Reference_.Read<kReadBarrierOption>();
}
static void SetClass(Class* klass);
static void ResetClass();
- static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
private:
// Note: This avoids a read barrier, it should only be used by the GC.
- HeapReference<Object>* GetReferentReferenceAddr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ HeapReference<Object>* GetReferentReferenceAddr() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObjectReferenceAddr<kDefaultVerifyFlags>(ReferentOffset());
}
@@ -130,10 +130,10 @@
}
template<bool kTransactionActive>
- void SetZombie(Object* zombie) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetZombie(Object* zombie) SHARED_REQUIRES(Locks::mutator_lock_) {
return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
}
- Object* GetZombie() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Object* GetZombie() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object>(ZombieOffset());
}
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index dc7131e..1167391 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -31,32 +31,32 @@
// C++ mirror of java.lang.StackTraceElement
class MANAGED StackTraceElement FINAL : public Object {
public:
- String* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ String* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_));
}
- String* GetMethodName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ String* GetMethodName() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_));
}
- String* GetFileName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ String* GetFileName() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_));
}
- int32_t GetLineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ int32_t GetLineNumber() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_));
}
static StackTraceElement* Alloc(Thread* self, Handle<String> declaring_class,
Handle<String> method_name, Handle<String> file_name,
int32_t line_number)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static void SetClass(Class* java_lang_StackTraceElement);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static Class* GetStackTraceElement() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ static Class* GetStackTraceElement() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!java_lang_StackTraceElement_.IsNull());
return java_lang_StackTraceElement_.Read();
}
@@ -71,7 +71,7 @@
template<bool kTransactionActive>
void Init(Handle<String> declaring_class, Handle<String> method_name, Handle<String> file_name,
int32_t line_number)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static GcRoot<Class> java_lang_StackTraceElement_;
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index b689057..3a39f58 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -42,7 +42,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
String* string = down_cast<String*>(obj);
string->SetCount(count_);
@@ -61,7 +61,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
String* string = down_cast<String*>(obj);
string->SetCount(count_);
@@ -88,7 +88,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
String* string = down_cast<String*>(obj);
string->SetCount(count_);
@@ -111,7 +111,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
String* string = down_cast<String*>(obj);
string->SetCount(count_);
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index af06385..eb2e1f6 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -49,87 +49,87 @@
return OFFSET_OF_OBJECT_MEMBER(String, value_);
}
- uint16_t* GetValue() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint16_t* GetValue() SHARED_REQUIRES(Locks::mutator_lock_) {
return &value_[0];
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ int32_t GetLength() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(String, count_));
}
- void SetCount(int32_t new_count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetCount(int32_t new_count) SHARED_REQUIRES(Locks::mutator_lock_) {
// Count is invariant so use non-transactional mode. Also disable check as we may run inside
// a transaction.
DCHECK_LE(0, new_count);
SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(String, count_), new_count);
}
- int32_t GetHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t GetHashCode() SHARED_REQUIRES(Locks::mutator_lock_);
// Computes, stores, and returns the hash code.
- int32_t ComputeHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t ComputeHashCode() SHARED_REQUIRES(Locks::mutator_lock_);
- int32_t GetUtfLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t GetUtfLength() SHARED_REQUIRES(Locks::mutator_lock_);
- uint16_t CharAt(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint16_t CharAt(int32_t index) SHARED_REQUIRES(Locks::mutator_lock_);
- void SetCharAt(int32_t index, uint16_t c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetCharAt(int32_t index, uint16_t c) SHARED_REQUIRES(Locks::mutator_lock_);
- String* Intern() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ String* Intern() SHARED_REQUIRES(Locks::mutator_lock_);
template <bool kIsInstrumented, typename PreFenceVisitor>
ALWAYS_INLINE static String* Alloc(Thread* self, int32_t utf16_length,
gc::AllocatorType allocator_type,
const PreFenceVisitor& pre_fence_visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template <bool kIsInstrumented>
ALWAYS_INLINE static String* AllocFromByteArray(Thread* self, int32_t byte_length,
Handle<ByteArray> array, int32_t offset,
int32_t high_byte,
gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template <bool kIsInstrumented>
ALWAYS_INLINE static String* AllocFromCharArray(Thread* self, int32_t count,
Handle<CharArray> array, int32_t offset,
gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template <bool kIsInstrumented>
ALWAYS_INLINE static String* AllocFromString(Thread* self, int32_t string_length,
Handle<String> string, int32_t offset,
gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static String* AllocFromStrings(Thread* self, Handle<String> string, Handle<String> string2)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static String* AllocFromUtf16(Thread* self, int32_t utf16_length, const uint16_t* utf16_data_in)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static String* AllocFromModifiedUtf8(Thread* self, const char* utf)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static String* AllocFromModifiedUtf8(Thread* self, int32_t utf16_length, const char* utf8_data_in)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// TODO: This is only used in the interpreter to compare against
// entries from a dex files constant pool (ArtField names). Should
// we unify this with Equals(const StringPiece&); ?
- bool Equals(const char* modified_utf8) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool Equals(const char* modified_utf8) SHARED_REQUIRES(Locks::mutator_lock_);
// TODO: This is only used to compare DexCache.location with
// a dex_file's location (which is an std::string). Do we really
// need this in mirror::String just for that one usage ?
bool Equals(const StringPiece& modified_utf8)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- bool Equals(String* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool Equals(String* that) SHARED_REQUIRES(Locks::mutator_lock_);
// Compare UTF-16 code point values not in a locale-sensitive manner
int Compare(int32_t utf16_length, const char* utf8_data_in);
@@ -137,21 +137,22 @@
// TODO: do we need this overload? give it a more intention-revealing name.
bool Equals(const uint16_t* that_chars, int32_t that_offset,
int32_t that_length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Create a modified UTF-8 encoded std::string from a java/lang/String object.
- std::string ToModifiedUtf8() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string ToModifiedUtf8() SHARED_REQUIRES(Locks::mutator_lock_);
- int32_t FastIndexOf(int32_t ch, int32_t start) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t FastIndexOf(int32_t ch, int32_t start) SHARED_REQUIRES(Locks::mutator_lock_);
- int32_t CompareTo(String* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t CompareTo(String* other) SHARED_REQUIRES(Locks::mutator_lock_);
- CharArray* ToCharArray(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ CharArray* ToCharArray(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
void GetChars(int32_t start, int32_t end, Handle<CharArray> array, int32_t index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- static Class* GetJavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static Class* GetJavaLangString() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!java_lang_String_.IsNull());
return java_lang_String_.Read();
}
@@ -159,10 +160,10 @@
static void SetClass(Class* java_lang_String);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
- void SetHashCode(int32_t new_hash_code) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetHashCode(int32_t new_hash_code) SHARED_REQUIRES(Locks::mutator_lock_) {
// Hash code is invariant so use non-transactional mode. Also disable check as we may run inside
// a transaction.
DCHECK_EQ(0, GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_)));
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index 1c21edb..e8633de 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -53,7 +53,7 @@
}
}
-void Throwable::SetStackState(Object* state) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void Throwable::SetStackState(Object* state) SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(state != nullptr);
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObjectVolatile<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_), state);
diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h
index 9cc0b6f..0f488dc 100644
--- a/runtime/mirror/throwable.h
+++ b/runtime/mirror/throwable.h
@@ -31,38 +31,38 @@
// C++ mirror of java.lang.Throwable
class MANAGED Throwable : public Object {
public:
- void SetDetailMessage(String* new_detail_message) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetDetailMessage(String* new_detail_message) SHARED_REQUIRES(Locks::mutator_lock_);
- String* GetDetailMessage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ String* GetDetailMessage() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_));
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_REQUIRES(Locks::mutator_lock_);
// This is a runtime version of initCause, you shouldn't use it if initCause may have been
// overridden. Also it asserts rather than throwing exceptions. Currently this is only used
// in cases like the verifier where the checks cannot fail and initCause isn't overridden.
- void SetCause(Throwable* cause) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetStackState(Object* state) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsCheckedException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetCause(Throwable* cause) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetStackState(Object* state) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsCheckedException() SHARED_REQUIRES(Locks::mutator_lock_);
- static Class* GetJavaLangThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static Class* GetJavaLangThrowable() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!java_lang_Throwable_.IsNull());
return java_lang_Throwable_.Read();
}
- int32_t GetStackDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t GetStackDepth() SHARED_REQUIRES(Locks::mutator_lock_);
static void SetClass(Class* java_lang_Throwable);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
- Object* GetStackState() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Object* GetStackState() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_));
}
- Object* GetStackTrace() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Object* GetStackTrace() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_trace_));
}
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index fd9c1b1..da6ee25 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -298,7 +298,7 @@
__attribute__((format(printf, 1, 2)));
static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
va_list args;
va_start(args, fmt);
Thread* self = Thread::Current();
@@ -667,11 +667,9 @@
// Suspend the owner, inflate. First change to blocked and give up mutator_lock_.
self->SetMonitorEnterObject(obj.Get());
bool timed_out;
- Thread* owner;
- {
- ScopedThreadStateChange tsc(self, kBlocked);
- owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
- }
+ self->TransitionFromRunnableToSuspended(kBlocked);
+ Thread* owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
+ self->TransitionFromSuspendedToRunnable();
if (owner != nullptr) {
// We succeeded in suspending the thread, check the lock's status didn't change.
lock_word = obj->GetLockWord(true);
@@ -1083,7 +1081,7 @@
}
}
-bool Monitor::IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool Monitor::IsLocked() SHARED_REQUIRES(Locks::mutator_lock_) {
MutexLock mu(Thread::Current(), monitor_lock_);
return owner_ != nullptr;
}
@@ -1189,7 +1187,7 @@
MonitorDeflateVisitor() : self_(Thread::Current()), deflate_count_(0) {}
virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (Monitor::Deflate(self_, object)) {
DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
++deflate_count_;
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 09a6cb6..3ca8954 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -62,34 +62,36 @@
static uint32_t GetLockOwnerThreadId(mirror::Object* obj)
NO_THREAD_SAFETY_ANALYSIS; // TODO: Reading lock owner without holding lock is racy.
+ // NO_THREAD_SAFETY_ANALYSIS for mon->Lock.
static mirror::Object* MonitorEnter(Thread* thread, mirror::Object* obj)
EXCLUSIVE_LOCK_FUNCTION(obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static bool MonitorExit(Thread* thread, mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- UNLOCK_FUNCTION(obj);
+ SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
- static void Notify(Thread* self, mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // NO_THREAD_SAFETY_ANALYSIS for mon->Unlock.
+ static bool MonitorExit(Thread* thread, mirror::Object* obj)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ UNLOCK_FUNCTION(obj) NO_THREAD_SAFETY_ANALYSIS;
+
+ static void Notify(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
DoNotify(self, obj, false);
}
- static void NotifyAll(Thread* self, mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static void NotifyAll(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
DoNotify(self, obj, true);
}
// Object.wait(). Also called for class init.
+ // NO_THREAD_SAFETY_ANALYSIS for mon->Wait.
static void Wait(Thread* self, mirror::Object* obj, int64_t ms, int32_t ns,
bool interruptShouldThrow, ThreadState why)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
static void DescribeWait(std::ostream& os, const Thread* thread)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_suspend_count_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Used to implement JDWP's ThreadReference.CurrentContendedMonitor.
static mirror::Object* GetContendedMonitor(Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Calls 'callback' once for each lock held in the single stack frame represented by
// the current state of 'stack_visitor'.
@@ -97,12 +99,12 @@
// is necessary when we have already aborted but want to dump the stack as much as we can.
static void VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
void* callback_context, bool abort_on_failure = true)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static bool IsValidLockWord(LockWord lock_word);
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- mirror::Object* GetObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* GetObject() SHARED_REQUIRES(Locks::mutator_lock_) {
return obj_.Read<kReadBarrierOption>();
}
@@ -114,7 +116,7 @@
int32_t GetHashCode();
- bool IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsLocked() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!monitor_lock_);
bool HasHashCode() const {
return hash_code_.LoadRelaxed() != 0;
@@ -126,12 +128,13 @@
// Inflate the lock on obj. May fail to inflate for spurious reasons, always re-check.
static void InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
- uint32_t hash_code) NO_THREAD_SAFETY_ANALYSIS;
+ uint32_t hash_code) SHARED_REQUIRES(Locks::mutator_lock_);
+ // Not exclusive because ImageWriter calls this during a Heap::VisitObjects() that
+ // does not allow a thread suspension in the middle. TODO: maybe make this exclusive.
+ // NO_THREAD_SAFETY_ANALYSIS for monitor->monitor_lock_.
static bool Deflate(Thread* self, mirror::Object* obj)
- // Not exclusive because ImageWriter calls this during a Heap::VisitObjects() that
- // does not allow a thread suspension in the middle. TODO: maybe make this exclusive.
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
#ifndef __LP64__
void* operator new(size_t size) {
@@ -149,57 +152,58 @@
private:
explicit Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
explicit Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code,
- MonitorId id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ MonitorId id) SHARED_REQUIRES(Locks::mutator_lock_);
// Install the monitor into its object, may fail if another thread installs a different monitor
// first.
bool Install(Thread* self)
- LOCKS_EXCLUDED(monitor_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!monitor_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Links a thread into a monitor's wait set. The monitor lock must be held by the caller of this
// routine.
- void AppendToWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
+ void AppendToWaitSet(Thread* thread) REQUIRES(monitor_lock_);
// Unlinks a thread from a monitor's wait set. The monitor lock must be held by the caller of
// this routine.
- void RemoveFromWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
+ void RemoveFromWaitSet(Thread* thread) REQUIRES(monitor_lock_);
// Changes the shape of a monitor from thin to fat, preserving the internal lock state. The
// calling thread must own the lock or the owner must be suspended. There's a race with other
// threads inflating the lock, installing hash codes and spurious failures. The caller should
// re-read the lock word following the call.
static void Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ NO_THREAD_SAFETY_ANALYSIS; // For m->Install(self)
void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
const char* owner_filename, uint32_t owner_line_number)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void FailedUnlock(mirror::Object* obj, Thread* expected_owner, Thread* found_owner,
Monitor* mon)
- LOCKS_EXCLUDED(Locks::thread_list_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void Lock(Thread* self)
- LOCKS_EXCLUDED(monitor_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!monitor_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool Unlock(Thread* thread)
- LOCKS_EXCLUDED(monitor_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!monitor_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void DoNotify(Thread* self, mirror::Object* obj, bool notify_all)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; // For mon->Notify.
void Notify(Thread* self)
- LOCKS_EXCLUDED(monitor_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!monitor_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void NotifyAll(Thread* self)
- LOCKS_EXCLUDED(monitor_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!monitor_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Wait on a monitor until timeout, interrupt, or notification. Used for Object.wait() and
@@ -222,15 +226,15 @@
// Since we're allowed to wake up "early", we clamp extremely long durations to return at the end
// of the 32-bit time epoch.
void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why)
- LOCKS_EXCLUDED(monitor_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!monitor_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Translates the provided method and pc into its declaring class' source file and line number.
void TranslateLocation(ArtMethod* method, uint32_t pc,
const char** source_file, uint32_t* line_number) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- uint32_t GetOwnerThreadId();
+ uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_);
static bool (*is_sensitive_thread_hook_)();
static uint32_t lock_profiling_threshold_;
@@ -285,17 +289,16 @@
MonitorList();
~MonitorList();
- void Add(Monitor* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Add(Monitor* m) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!monitor_list_lock_);
void SweepMonitorList(IsMarkedVisitor* visitor)
- LOCKS_EXCLUDED(monitor_list_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void DisallowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_);
- void AllowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_);
- void EnsureNewMonitorsDisallowed() LOCKS_EXCLUDED(monitor_list_lock_);
- void BroadcastForNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_);
+ REQUIRES(!monitor_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ void DisallowNewMonitors() REQUIRES(!monitor_list_lock_);
+ void AllowNewMonitors() REQUIRES(!monitor_list_lock_);
+ void EnsureNewMonitorsDisallowed() REQUIRES(!monitor_list_lock_);
+ void BroadcastForNewMonitors() REQUIRES(!monitor_list_lock_);
// Returns how many monitors were deflated.
- size_t DeflateMonitors() LOCKS_EXCLUDED(monitor_list_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t DeflateMonitors() REQUIRES(!monitor_list_lock_) REQUIRES(Locks::mutator_lock_);
typedef std::list<Monitor*, TrackingAllocator<Monitor*, kAllocatorTagMonitorList>> Monitors;
@@ -318,7 +321,7 @@
// For use only by the JDWP implementation.
class MonitorInfo {
public:
- explicit MonitorInfo(mirror::Object* o) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ explicit MonitorInfo(mirror::Object* o) REQUIRES(Locks::mutator_lock_);
Thread* owner_;
size_t entry_count_;
diff --git a/runtime/monitor_pool.cc b/runtime/monitor_pool.cc
index 4a364ca..2832e32 100644
--- a/runtime/monitor_pool.cc
+++ b/runtime/monitor_pool.cc
@@ -90,7 +90,7 @@
Monitor* MonitorPool::CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj,
int32_t hash_code)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// We are gonna allocate, so acquire the writer lock.
MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index 4ab4e86..240ca61 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -43,7 +43,7 @@
}
static Monitor* CreateMonitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
#ifndef __LP64__
Monitor* mon = new Monitor(self, owner, obj, hash_code);
DCHECK_ALIGNED(mon, LockWord::kMonitorIdAlignment);
@@ -110,10 +110,10 @@
// analysis.
MonitorPool() NO_THREAD_SAFETY_ANALYSIS;
- void AllocateChunk() EXCLUSIVE_LOCKS_REQUIRED(Locks::allocated_monitor_ids_lock_);
+ void AllocateChunk() REQUIRES(Locks::allocated_monitor_ids_lock_);
Monitor* CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void ReleaseMonitorToPool(Thread* self, Monitor* monitor);
void ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors);
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 2a29c60..1be637c 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -60,7 +60,7 @@
static void FillHeap(Thread* self, ClassLinker* class_linker,
std::unique_ptr<StackHandleScope<kMaxHandles>>* hsp,
std::vector<MutableHandle<mirror::Object>>* handles)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
hsp->reset(new StackHandleScope<kMaxHandles>(self));
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 4f97d20..1b210bb 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -17,6 +17,7 @@
#include "dalvik_system_DexFile.h"
#include "base/logging.h"
+#include "base/out.h"
#include "base/stl_util.h"
#include "base/stringprintf.h"
#include "class_linker.h"
@@ -164,7 +165,8 @@
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::vector<std::string> error_msgs;
- dex_files = linker->OpenDexFilesFromOat(sourceName.c_str(), outputName.c_str(), &error_msgs);
+ dex_files =
+ linker->OpenDexFilesFromOat(sourceName.c_str(), outputName.c_str(), outof(error_msgs));
if (!dex_files.empty()) {
jlongArray array = ConvertNativeToJavaArray(env, dex_files);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 5dd354d..9ea339a 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -16,7 +16,7 @@
#include "dalvik_system_VMRuntime.h"
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
extern "C" void android_set_application_target_sdk_version(uint32_t version);
#endif
#include <limits.h>
@@ -196,7 +196,7 @@
// Note that targetSdkVersion may be 0, meaning "current".
Runtime::Current()->SetTargetSdkVersion(target_sdk_version);
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
// This part is letting libc/dynamic linker know about current app's
// target sdk version to enable compatibility workarounds.
android_set_application_target_sdk_version(static_cast<uint32_t>(target_sdk_version));
@@ -262,7 +262,7 @@
explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) { }
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::String* string = root->AsString();
table_->operator[](string->ToModifiedUtf8()) = string;
}
@@ -274,7 +274,7 @@
// Based on ClassLinker::ResolveString.
static void PreloadDexCachesResolveString(
Handle<mirror::DexCache> dex_cache, uint32_t string_idx, StringTable& strings)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::String* string = dex_cache->GetResolvedString(string_idx);
if (string != nullptr) {
return;
@@ -292,7 +292,7 @@
// Based on ClassLinker::ResolveType.
static void PreloadDexCachesResolveType(
Thread* self, mirror::DexCache* dex_cache, uint32_t type_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
if (klass != nullptr) {
return;
@@ -321,7 +321,7 @@
// Based on ClassLinker::ResolveField.
static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uint32_t field_idx,
bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ArtField* field = dex_cache->GetResolvedField(field_idx, sizeof(void*));
if (field != nullptr) {
return;
@@ -349,7 +349,7 @@
// Based on ClassLinker::ResolveMethod.
static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, uint32_t method_idx,
InvokeType invoke_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* method = dex_cache->GetResolvedMethod(method_idx, sizeof(void*));
if (method != nullptr) {
return;
@@ -423,7 +423,7 @@
}
static void PreloadDexCachesStatsFilled(DexCacheStats* filled)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (!kPreloadDexCachesCollectStats) {
return;
}
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index ee62755..541eeb1 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -29,7 +29,7 @@
namespace art {
static jobject GetThreadStack(const ScopedFastNativeObjectAccess& soa, jobject peer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
jobject trace = nullptr;
if (soa.Decode<mirror::Object*>(peer) == soa.Self()->GetPeer()) {
trace = soa.Self()->CreateInternalStackTrace<false>(soa);
@@ -87,7 +87,7 @@
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
class_loader(nullptr) {}
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(class_loader == nullptr);
mirror::Class* c = GetMethod()->GetDeclaringClass();
// c is null for runtime methods.
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index a41aed6..eddb2d1 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -41,7 +41,7 @@
ALWAYS_INLINE static inline mirror::Class* DecodeClass(
const ScopedFastNativeObjectAccess& soa, jobject java_class)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
DCHECK(c != nullptr);
DCHECK(c->IsClass());
@@ -108,7 +108,7 @@
static mirror::ObjectArray<mirror::Field>* GetDeclaredFields(
Thread* self, mirror::Class* klass, bool public_only, bool force_resolve)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
auto* ifields = klass->GetIFields();
auto* sfields = klass->GetSFields();
@@ -189,7 +189,7 @@
// fast.
ALWAYS_INLINE static inline ArtField* FindFieldByName(
Thread* self ATTRIBUTE_UNUSED, mirror::String* name, ArtField* fields, size_t num_fields)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
size_t low = 0;
size_t high = num_fields;
const uint16_t* const data = name->GetValue();
@@ -218,7 +218,7 @@
ALWAYS_INLINE static inline mirror::Field* GetDeclaredField(
Thread* self, mirror::Class* c, mirror::String* name)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
auto* instance_fields = c->GetIFields();
auto* art_field = FindFieldByName(self, name, instance_fields, c->NumInstanceFields());
if (art_field != nullptr) {
@@ -274,7 +274,7 @@
}
static ALWAYS_INLINE inline bool MethodMatchesConstructor(ArtMethod* m, bool public_only)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(m != nullptr);
return (!public_only || m->IsPublic()) && !m->IsStatic() && m->IsConstructor();
}
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index abac815..856a3e7 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -31,10 +31,10 @@
#include "verify_object-inl.h"
#include <sstream>
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
// This function is provided by android linker.
extern "C" void android_update_LD_LIBRARY_PATH(const char* ld_library_path);
-#endif // HAVE_ANDROID_OS
+#endif // __ANDROID__
namespace art {
@@ -53,7 +53,7 @@
}
static void SetLdLibraryPath(JNIEnv* env, jstring javaLdLibraryPathJstr) {
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
if (javaLdLibraryPathJstr != nullptr) {
ScopedUtfChars ldLibraryPath(env, javaLdLibraryPathJstr);
if (ldLibraryPath.c_str() != nullptr) {
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index 97aae67..d9863c5 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -36,7 +36,7 @@
*/
static void ThrowArrayStoreException_NotAnArray(const char* identifier, mirror::Object* array)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::string actualType(PrettyTypeOf(array));
Thread* self = Thread::Current();
self->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;",
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index 1515630..62a0b76 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -16,6 +16,7 @@
#include "java_lang_VMClassLoader.h"
+#include "base/out.h"
#include "class_linker.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
@@ -45,7 +46,7 @@
// Try the common case.
StackHandleScope<1> hs(soa.Self());
cl->FindClassInPathClassLoader(soa, soa.Self(), descriptor.c_str(), descriptor_hash,
- hs.NewHandle(loader), &c);
+ hs.NewHandle(loader), outof(c));
if (c != nullptr) {
return soa.AddLocalReference<jclass>(c);
}
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index ba898c6..5bbb0dc 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -32,7 +32,7 @@
template<bool kIsSet>
ALWAYS_INLINE inline static bool VerifyFieldAccess(Thread* self, mirror::Field* field,
mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (kIsSet && field->IsFinal()) {
ThrowIllegalAccessException(
StringPrintf("Cannot set %s field %s of class %s",
@@ -60,7 +60,7 @@
template<bool kAllowReferences>
ALWAYS_INLINE inline static bool GetFieldValue(mirror::Object* o, mirror::Field* f,
Primitive::Type field_type, JValue* value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK_EQ(value->GetJ(), INT64_C(0));
MemberOffset offset(f->GetOffset());
const bool is_volatile = f->IsVolatile();
@@ -105,7 +105,7 @@
ALWAYS_INLINE inline static bool CheckReceiver(const ScopedFastNativeObjectAccess& soa,
jobject j_rcvr, mirror::Field** f,
mirror::Object** class_or_rcvr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
soa.Self()->AssertThreadSuspensionIsAllowable();
mirror::Class* declaringClass = (*f)->GetDeclaringClass();
if ((*f)->IsStatic()) {
@@ -232,7 +232,7 @@
ALWAYS_INLINE inline static void SetFieldValue(mirror::Object* o, mirror::Field* f,
Primitive::Type field_type, bool allow_references,
const JValue& new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(f->GetDeclaringClass()->IsInitialized());
MemberOffset offset(f->GetOffset());
const bool is_volatile = f->IsVolatile();
@@ -253,9 +253,9 @@
break;
case Primitive::kPrimChar:
if (is_volatile) {
- o->SetFieldBooleanVolatile<false>(offset, new_value.GetC());
+ o->SetFieldCharVolatile<false>(offset, new_value.GetC());
} else {
- o->SetFieldBoolean<false>(offset, new_value.GetC());
+ o->SetFieldChar<false>(offset, new_value.GetC());
}
break;
case Primitive::kPrimInt:
diff --git a/runtime/native/scoped_fast_native_object_access.h b/runtime/native/scoped_fast_native_object_access.h
index 57b873b..c4a33df 100644
--- a/runtime/native/scoped_fast_native_object_access.h
+++ b/runtime/native/scoped_fast_native_object_access.h
@@ -27,7 +27,7 @@
class ScopedFastNativeObjectAccess : public ScopedObjectAccessAlreadyRunnable {
public:
explicit ScopedFastNativeObjectAccess(JNIEnv* env)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+ REQUIRES(!Locks::thread_suspend_count_lock_)
SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE
: ScopedObjectAccessAlreadyRunnable(env) {
Locks::mutator_lock_->AssertSharedHeld(Self());
diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h
index 7fe3130..2295cb4 100644
--- a/runtime/nth_caller_visitor.h
+++ b/runtime/nth_caller_visitor.h
@@ -33,7 +33,7 @@
count(0),
caller(nullptr) {}
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
bool do_count = false;
if (m == nullptr || m->IsRuntimeMethod()) {
diff --git a/runtime/oat.h b/runtime/oat.h
index ee2f3f6..29dd76c 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '6', '7', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '6', '8', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 098fe61..80fc7fa 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -27,11 +27,12 @@
#include <sstream>
// dlopen_ext support from bionic.
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
#include "android/dlext.h"
#endif
#include "art_method-inl.h"
+#include "base/out.h"
#include "base/bit_vector.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
@@ -88,16 +89,16 @@
OatFile* OatFile::OpenWithElfFile(ElfFile* elf_file,
const std::string& location,
const char* abs_dex_location,
- std::string* error_msg) {
+ out<std::string> error_msg) {
std::unique_ptr<OatFile> oat_file(new OatFile(location, false));
oat_file->elf_file_.reset(elf_file);
uint64_t offset, size;
- bool has_section = elf_file->GetSectionOffsetAndSize(".rodata", &offset, &size);
+ bool has_section = elf_file->GetSectionOffsetAndSize(".rodata", outof(offset), outof(size));
CHECK(has_section);
oat_file->begin_ = elf_file->Begin() + offset;
oat_file->end_ = elf_file->Begin() + size + offset;
// Ignore the optional .bss section when opening non-executable.
- return oat_file->Setup(abs_dex_location, error_msg) ? oat_file.release() : nullptr;
+ return oat_file->Setup(abs_dex_location, outof_forward(error_msg)) ? oat_file.release() : nullptr;
}
OatFile* OatFile::Open(const std::string& filename,
@@ -106,7 +107,7 @@
uint8_t* oat_file_begin,
bool executable,
const char* abs_dex_location,
- std::string* error_msg) {
+ out<std::string> error_msg) {
CHECK(!filename.empty()) << location;
CheckLocation(location);
std::unique_ptr<OatFile> ret;
@@ -154,27 +155,34 @@
return ret.release();
}
-OatFile* OatFile::OpenWritable(File* file, const std::string& location,
+OatFile* OatFile::OpenWritable(File* file,
+ const std::string& location,
const char* abs_dex_location,
- std::string* error_msg) {
+ out<std::string> error_msg) {
CheckLocation(location);
- return OpenElfFile(file, location, nullptr, nullptr, true, false, abs_dex_location, error_msg);
+ return OpenElfFile(file, location, nullptr, nullptr, true, false, abs_dex_location,
+ outof_forward(error_msg));
}
-OatFile* OatFile::OpenReadable(File* file, const std::string& location,
+OatFile* OatFile::OpenReadable(File* file,
+ const std::string& location,
const char* abs_dex_location,
- std::string* error_msg) {
+ out<std::string> error_msg) {
CheckLocation(location);
- return OpenElfFile(file, location, nullptr, nullptr, false, false, abs_dex_location, error_msg);
+ return OpenElfFile(file, location, nullptr, nullptr, false, false, abs_dex_location,
+ outof_forward(error_msg));
}
OatFile* OatFile::OpenDlopen(const std::string& elf_filename,
const std::string& location,
uint8_t* requested_base,
const char* abs_dex_location,
- std::string* error_msg) {
+ out<std::string> error_msg) {
std::unique_ptr<OatFile> oat_file(new OatFile(location, true));
- bool success = oat_file->Dlopen(elf_filename, requested_base, abs_dex_location, error_msg);
+ bool success = oat_file->Dlopen(elf_filename,
+ requested_base,
+ abs_dex_location,
+ outof_forward(error_msg));
if (!success) {
return nullptr;
}
@@ -188,10 +196,10 @@
bool writable,
bool executable,
const char* abs_dex_location,
- std::string* error_msg) {
+ out<std::string> error_msg) {
std::unique_ptr<OatFile> oat_file(new OatFile(location, executable));
bool success = oat_file->ElfFileOpen(file, requested_base, oat_file_begin, writable, executable,
- abs_dex_location, error_msg);
+ abs_dex_location, outof_forward(error_msg));
if (!success) {
CHECK(!error_msg->empty());
return nullptr;
@@ -200,8 +208,13 @@
}
OatFile::OatFile(const std::string& location, bool is_executable)
- : location_(location), begin_(nullptr), end_(nullptr), bss_begin_(nullptr), bss_end_(nullptr),
- is_executable_(is_executable), dlopen_handle_(nullptr),
+ : location_(location),
+ begin_(nullptr),
+ end_(nullptr),
+ bss_begin_(nullptr),
+ bss_end_(nullptr),
+ is_executable_(is_executable),
+ dlopen_handle_(nullptr),
secondary_lookup_lock_("OatFile secondary lookup lock", kOatFileSecondaryLookupLock) {
CHECK(!location_.empty());
}
@@ -213,8 +226,10 @@
}
}
-bool OatFile::Dlopen(const std::string& elf_filename, uint8_t* requested_base,
- const char* abs_dex_location, std::string* error_msg) {
+bool OatFile::Dlopen(const std::string& elf_filename,
+ uint8_t* requested_base,
+ const char* abs_dex_location,
+ out<std::string> error_msg) {
#ifdef __APPLE__
// The dl_iterate_phdr syscall is missing. There is similar API on OSX,
// but let's fallback to the custom loading code for the time being.
@@ -229,7 +244,7 @@
*error_msg = StringPrintf("Failed to find absolute path for '%s'", elf_filename.c_str());
return false;
}
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
android_dlextinfo extinfo;
extinfo.flags = ANDROID_DLEXT_FORCE_LOAD | ANDROID_DLEXT_FORCE_FIXED_VADDR;
dlopen_handle_ = android_dlopen_ext(absolute_path.get(), RTLD_NOW, &extinfo);
@@ -319,22 +334,28 @@
LOG(ERROR) << "File " << elf_filename << " loaded with dlopen but can not find its mmaps.";
}
- return Setup(abs_dex_location, error_msg);
+ return Setup(abs_dex_location, outof_forward(error_msg));
#endif // __APPLE__
}
-bool OatFile::ElfFileOpen(File* file, uint8_t* requested_base, uint8_t* oat_file_begin,
- bool writable, bool executable,
+bool OatFile::ElfFileOpen(File* file,
+ uint8_t* requested_base,
+ uint8_t* oat_file_begin,
+ bool writable,
+ bool executable,
const char* abs_dex_location,
- std::string* error_msg) {
+ out<std::string> error_msg) {
// TODO: rename requested_base to oat_data_begin
- elf_file_.reset(ElfFile::Open(file, writable, /*program_header_only*/true, error_msg,
+ elf_file_.reset(ElfFile::Open(file,
+ writable,
+ /*program_header_only*/true,
+ outof_forward(error_msg),
oat_file_begin));
if (elf_file_ == nullptr) {
DCHECK(!error_msg->empty());
return false;
}
- bool loaded = elf_file_->Load(executable, error_msg);
+ bool loaded = elf_file_->Load(executable, outof_forward(error_msg));
if (!loaded) {
DCHECK(!error_msg->empty());
return false;
@@ -375,10 +396,10 @@
bss_end_ += sizeof(uint32_t);
}
- return Setup(abs_dex_location, error_msg);
+ return Setup(abs_dex_location, outof_forward(error_msg));
}
-bool OatFile::Setup(const char* abs_dex_location, std::string* error_msg) {
+bool OatFile::Setup(const char* abs_dex_location, out<std::string> error_msg) {
if (!GetOatHeader().IsValid()) {
std::string cause = GetOatHeader().GetValidationErrorMessage();
*error_msg = StringPrintf("Invalid oat header for '%s': %s", GetLocation().c_str(),
@@ -617,9 +638,9 @@
return reinterpret_cast<const DexFile::Header*>(dex_file_pointer_)->file_size_;
}
-std::unique_ptr<const DexFile> OatFile::OatDexFile::OpenDexFile(std::string* error_msg) const {
+std::unique_ptr<const DexFile> OatFile::OatDexFile::OpenDexFile(out<std::string> error_msg) const {
return DexFile::Open(dex_file_pointer_, FileSize(), dex_file_location_,
- dex_file_location_checksum_, this, error_msg);
+ dex_file_location_checksum_, this, outof_forward(error_msg));
}
uint32_t OatFile::OatDexFile::GetOatClassOffset(uint16_t class_def_index) const {
@@ -777,7 +798,7 @@
return out.str();
}
-bool OatFile::CheckStaticDexFileDependencies(const char* dex_dependencies, std::string* msg) {
+bool OatFile::CheckStaticDexFileDependencies(const char* dex_dependencies, out<std::string> msg) {
if (dex_dependencies == nullptr || dex_dependencies[0] == 0) {
// No dependencies.
return true;
@@ -786,7 +807,7 @@
// Assumption: this is not performance-critical. So it's OK to do this with a std::string and
// Split() instead of manual parsing of the combined char*.
std::vector<std::string> split;
- Split(dex_dependencies, kDexClassPathEncodingSeparator, &split);
+ Split(dex_dependencies, kDexClassPathEncodingSeparator, outof(split));
if (split.size() % 2 != 0) {
// Expected pairs of location and checksum.
*msg = StringPrintf("Odd number of elements in dependency list %s", dex_dependencies);
@@ -806,8 +827,8 @@
uint32_t dex_checksum;
std::string error_msg;
if (DexFile::GetChecksum(DexFile::GetDexCanonicalLocation(location.c_str()).c_str(),
- &dex_checksum,
- &error_msg)) {
+ outof(dex_checksum),
+ outof(error_msg))) {
if (converted != dex_checksum) {
*msg = StringPrintf("Checksums don't match for %s: %" PRId64 " vs %u",
location.c_str(), converted, dex_checksum);
@@ -826,8 +847,7 @@
}
bool OatFile::GetDexLocationsFromDependencies(const char* dex_dependencies,
- std::vector<std::string>* locations) {
- DCHECK(locations != nullptr);
+ out<std::vector<std::string>> locations) {
if (dex_dependencies == nullptr || dex_dependencies[0] == 0) {
return true;
}
@@ -835,7 +855,7 @@
// Assumption: this is not performance-critical. So it's OK to do this with a std::string and
// Split() instead of manual parsing of the combined char*.
std::vector<std::string> split;
- Split(dex_dependencies, kDexClassPathEncodingSeparator, &split);
+ Split(dex_dependencies, kDexClassPathEncodingSeparator, outof(split));
if (split.size() % 2 != 0) {
// Expected pairs of location and checksum.
return false;
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 7c4ef8b..6c40c68 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -22,6 +22,7 @@
#include <vector>
#include "base/mutex.h"
+#include "base/out_fwd.h"
#include "base/stringpiece.h"
#include "dex_file.h"
#include "invoke_type.h"
@@ -45,9 +46,10 @@
// Opens an oat file contained within the given elf file. This is always opened as
// non-executable at the moment.
- static OatFile* OpenWithElfFile(ElfFile* elf_file, const std::string& location,
+ static OatFile* OpenWithElfFile(ElfFile* elf_file,
+ const std::string& location,
const char* abs_dex_location,
- std::string* error_msg);
+ out<std::string> error_msg);
// Open an oat file. Returns null on failure. Requested base can
// optionally be used to request where the file should be loaded.
// See the ResolveRelativeEncodedDexLocation for a description of how the
@@ -58,20 +60,22 @@
uint8_t* oat_file_begin,
bool executable,
const char* abs_dex_location,
- std::string* error_msg);
+ out<std::string> error_msg);
// Open an oat file from an already opened File.
// Does not use dlopen underneath so cannot be used for runtime use
// where relocations may be required. Currently used from
// ImageWriter which wants to open a writable version from an existing
// file descriptor for patching.
- static OatFile* OpenWritable(File* file, const std::string& location,
+ static OatFile* OpenWritable(File* file,
+ const std::string& location,
const char* abs_dex_location,
- std::string* error_msg);
+ out<std::string> error_msg);
// Opens an oat file from an already opened File. Maps it PROT_READ, MAP_PRIVATE.
- static OatFile* OpenReadable(File* file, const std::string& location,
+ static OatFile* OpenReadable(File* file,
+ const std::string& location,
const char* abs_dex_location,
- std::string* error_msg);
+ out<std::string> error_msg);
~OatFile();
@@ -215,7 +219,7 @@
const OatDexFile* GetOatDexFile(const char* dex_location,
const uint32_t* const dex_location_checksum,
bool exception_if_not_found = true) const
- LOCKS_EXCLUDED(secondary_lookup_lock_);
+ REQUIRES(!secondary_lookup_lock_);
const std::vector<const OatDexFile*>& GetOatDexFiles() const {
return oat_dex_files_storage_;
@@ -252,12 +256,13 @@
// Check the given dependency list against their dex files - thus the name "Static," this does
// not check the class-loader environment, only whether there have been file updates.
- static bool CheckStaticDexFileDependencies(const char* dex_dependencies, std::string* msg);
+ static bool CheckStaticDexFileDependencies(const char* dex_dependencies,
+ out<std::string> error_msg);
// Get the dex locations of a dependency list. Note: this is *not* cleaned for synthetic
// locations of multidex files.
static bool GetDexLocationsFromDependencies(const char* dex_dependencies,
- std::vector<std::string>* locations);
+ out<std::vector<std::string>> locations);
private:
static void CheckLocation(const std::string& location);
@@ -266,7 +271,7 @@
const std::string& location,
uint8_t* requested_base,
const char* abs_dex_location,
- std::string* error_msg);
+ out<std::string> error_msg);
static OatFile* OpenElfFile(File* file,
const std::string& location,
@@ -275,18 +280,22 @@
bool writable,
bool executable,
const char* abs_dex_location,
- std::string* error_msg);
+ out<std::string> error_msg);
explicit OatFile(const std::string& filename, bool executable);
- bool Dlopen(const std::string& elf_filename, uint8_t* requested_base,
- const char* abs_dex_location, std::string* error_msg);
- bool ElfFileOpen(File* file, uint8_t* requested_base,
+ bool Dlopen(const std::string& elf_filename,
+ uint8_t* requested_base,
+ const char* abs_dex_location,
+ out<std::string> error_msg);
+ bool ElfFileOpen(File* file,
+ uint8_t* requested_base,
uint8_t* oat_file_begin, // Override where the file is loaded to if not null
- bool writable, bool executable,
+ bool writable,
+ bool executable,
const char* abs_dex_location,
- std::string* error_msg);
+ out<std::string> error_msg);
- bool Setup(const char* abs_dex_location, std::string* error_msg);
+ bool Setup(const char* abs_dex_location, out<std::string> error_msg);
// The oat file name.
//
@@ -365,7 +374,7 @@
class OatDexFile FINAL {
public:
// Opens the DexFile referred to by this OatDexFile from within the containing OatFile.
- std::unique_ptr<const DexFile> OpenDexFile(std::string* error_msg) const;
+ std::unique_ptr<const DexFile> OpenDexFile(out<std::string> error_msg) const;
const OatFile* GetOatFile() const {
return oat_file_;
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 29b879e..e919b36 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -29,6 +29,7 @@
#include <set>
#include "base/logging.h"
+#include "base/out.h"
#include "base/stringprintf.h"
#include "class_linker.h"
#include "gc/heap.h"
@@ -230,7 +231,7 @@
return std::vector<std::unique_ptr<const DexFile>>();
}
- std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
+ std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(outof(error_msg));
if (dex_file.get() == nullptr) {
LOG(WARNING) << "Failed to open dex file from oat dex file: " << error_msg;
return std::vector<std::unique_ptr<const DexFile>>();
@@ -246,7 +247,7 @@
break;
}
- dex_file = oat_dex_file->OpenDexFile(&error_msg);
+ dex_file = oat_dex_file->OpenDexFile(outof(error_msg));
if (dex_file.get() == nullptr) {
LOG(WARNING) << "Failed to open dex file from oat dex file: " << error_msg;
return std::vector<std::unique_ptr<const DexFile>>();
@@ -271,7 +272,7 @@
std::string error_msg;
cached_odex_file_name_found_ = DexFilenameToOdexFilename(
- dex_location_, isa_, &cached_odex_file_name_, &error_msg);
+ dex_location_, isa_, &cached_odex_file_name_, outof(error_msg));
if (!cached_odex_file_name_found_) {
// If we can't figure out the odex file, we treat it as if the odex
// file was inaccessible.
@@ -339,7 +340,7 @@
DalvikCacheDirectory().c_str(), GetInstructionSetString(isa_));
std::string error_msg;
cached_oat_file_name_found_ = GetDalvikCacheFilename(dex_location_,
- cache_dir.c_str(), &cached_oat_file_name_, &error_msg);
+ cache_dir.c_str(), &cached_oat_file_name_, outof(error_msg));
if (!cached_oat_file_name_found_) {
// If we can't determine the oat file name, we treat the oat file as
// inaccessible.
@@ -432,7 +433,7 @@
std::string error_msg;
uint32_t expected_secondary_checksum = 0;
if (DexFile::GetChecksum(secondary_dex_location.c_str(),
- &expected_secondary_checksum, &error_msg)) {
+ &expected_secondary_checksum, outof(error_msg))) {
uint32_t actual_secondary_checksum
= secondary_oat_dex_file->GetDexFileLocationChecksum();
if (expected_secondary_checksum != actual_secondary_checksum) {
@@ -722,7 +723,7 @@
if (runtime->IsDebuggable()) {
argv.push_back("--debuggable");
}
- runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
+ runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(outof(argv));
if (!runtime->IsVerificationEnabled()) {
argv.push_back("--compiler-filter=verify-none");
@@ -873,7 +874,7 @@
std::string error_msg;
cached_odex_file_.reset(OatFile::Open(odex_file_name.c_str(),
odex_file_name.c_str(), nullptr, nullptr, load_executable_,
- dex_location_, &error_msg));
+ dex_location_, outof(error_msg)));
if (cached_odex_file_.get() == nullptr) {
VLOG(oat) << "OatFileAssistant test for existing pre-compiled oat file "
<< odex_file_name << ": " << error_msg;
@@ -904,7 +905,7 @@
std::string error_msg;
cached_oat_file_.reset(OatFile::Open(oat_file_name.c_str(),
oat_file_name.c_str(), nullptr, nullptr, load_executable_,
- dex_location_, &error_msg));
+ dex_location_, outof(error_msg)));
if (cached_oat_file_.get() == nullptr) {
VLOG(oat) << "OatFileAssistant test for existing oat file "
<< oat_file_name << ": " << error_msg;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index d133fa3..4a0de59 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -26,6 +26,7 @@
#include <gtest/gtest.h>
#include "art_field-inl.h"
+#include "base/out.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "compiler_callbacks.h"
@@ -38,6 +39,28 @@
namespace art {
+// Some tests very occasionally fail: we expect to have an unrelocated non-pic
+// odex file that is reported as needing relocation, but it is reported
+// instead as being up to date (b/22599792).
+//
+// This function adds extra checks for diagnosing why the given oat file is
+// reported up to date, when it should be non-pic needing relocation.
+// These extra diagnostics checks should be removed once b/22599792 has been
+// resolved.
+static void DiagnoseFlakyTestFailure(const OatFile& oat_file) {
+ Runtime* runtime = Runtime::Current();
+ const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+ ASSERT_TRUE(image_space != nullptr);
+ const ImageHeader& image_header = image_space->GetImageHeader();
+ const OatHeader& oat_header = oat_file.GetOatHeader();
+ EXPECT_FALSE(oat_file.IsPic());
+ EXPECT_EQ(image_header.GetOatChecksum(), oat_header.GetImageFileLocationOatChecksum());
+ EXPECT_NE(reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin()),
+ oat_header.GetImageFileLocationOatDataBegin());
+ EXPECT_NE(image_header.GetPatchDelta(), oat_header.GetImagePatchDelta());
+}
+
+
class OatFileAssistantTest : public CommonRuntimeTest {
public:
virtual void SetUp() {
@@ -65,7 +88,7 @@
<< "Expected dex file to be at: " << GetDexSrc1();
ASSERT_TRUE(OS::FileExists(GetStrippedDexSrc1().c_str()))
<< "Expected stripped dex file to be at: " << GetStrippedDexSrc1();
- ASSERT_FALSE(DexFile::GetChecksum(GetStrippedDexSrc1().c_str(), &checksum, &error_msg))
+ ASSERT_FALSE(DexFile::GetChecksum(GetStrippedDexSrc1().c_str(), &checksum, outof(error_msg)))
<< "Expected stripped dex file to be stripped: " << GetStrippedDexSrc1();
ASSERT_TRUE(OS::FileExists(GetDexSrc2().c_str()))
<< "Expected dex file to be at: " << GetDexSrc2();
@@ -74,12 +97,12 @@
// GetMultiDexSrc1, but a different secondary dex checksum.
std::vector<std::unique_ptr<const DexFile>> multi1;
ASSERT_TRUE(DexFile::Open(GetMultiDexSrc1().c_str(),
- GetMultiDexSrc1().c_str(), &error_msg, &multi1)) << error_msg;
+ GetMultiDexSrc1().c_str(), outof(error_msg), &multi1)) << error_msg;
ASSERT_GT(multi1.size(), 1u);
std::vector<std::unique_ptr<const DexFile>> multi2;
ASSERT_TRUE(DexFile::Open(GetMultiDexSrc2().c_str(),
- GetMultiDexSrc2().c_str(), &error_msg, &multi2)) << error_msg;
+ GetMultiDexSrc2().c_str(), outof(error_msg), &multi2)) << error_msg;
ASSERT_GT(multi2.size(), 1u);
ASSERT_EQ(multi1[0]->GetLocationChecksum(), multi2[0]->GetLocationChecksum());
@@ -186,6 +209,7 @@
// Generate an odex file for the purposes of test.
// If pic is true, generates a PIC odex.
+ // The generated odex file will be un-relocated.
void GenerateOdexForTest(const std::string& dex_location,
const std::string& odex_location,
bool pic = false) {
@@ -208,8 +232,18 @@
args.push_back("--runtime-arg");
args.push_back("-Xnorelocate");
std::string error_msg;
- ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
+ ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, outof(error_msg))) << error_msg;
setenv("ANDROID_DATA", android_data_.c_str(), 1);
+
+ // Verify the odex file was generated as expected.
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(
+ odex_location.c_str(), odex_location.c_str(), nullptr, nullptr,
+ false, dex_location.c_str(), outof(error_msg)));
+ ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
+
+ if (!pic) {
+ DiagnoseFlakyTestFailure(*odex_file);
+ }
}
void GeneratePicOdexForTest(const std::string& dex_location,
@@ -250,7 +284,7 @@
image_reservation_.push_back(std::unique_ptr<MemMap>(
MemMap::MapAnonymous("image reservation",
reinterpret_cast<uint8_t*>(start), end - start,
- PROT_NONE, false, false, &error_msg)));
+ PROT_NONE, false, false, outof(error_msg))));
ASSERT_TRUE(image_reservation_.back().get() != nullptr) << error_msg;
LOG(INFO) << "Reserved space for image " <<
reinterpret_cast<void*>(image_reservation_.back()->Begin()) << "-" <<
@@ -285,7 +319,7 @@
OatFileAssistant oat_file_assistant(dex_location, kRuntimeISA, false);
std::string error_msg;
- ASSERT_TRUE(oat_file_assistant.GenerateOatFile(&error_msg)) << error_msg;
+ ASSERT_TRUE(oat_file_assistant.GenerateOatFile(outof(error_msg))) << error_msg;
}
// Case: We have a DEX file, but no OAT file for it.
@@ -324,7 +358,7 @@
// Trying to make the oat file up to date should not fail or crash.
std::string error_msg;
- EXPECT_TRUE(oat_file_assistant.MakeUpToDate(&error_msg));
+ EXPECT_TRUE(oat_file_assistant.MakeUpToDate(outof(error_msg)));
// Trying to get the best oat file should fail, but not crash.
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
@@ -408,7 +442,7 @@
args.push_back("--oat-file=" + oat_location);
std::string error_msg;
- ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
+ ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, outof(error_msg))) << error_msg;
// Verify we can load both dex files.
OatFileAssistant oat_file_assistant(dex_location.c_str(),
@@ -446,27 +480,6 @@
EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles());
}
-// Some tests very occasionally fail: we expect to have an unrelocated non-pic
-// odex file that is reported as needing relocation, but it is reported
-// instead as being up to date (b/22599792).
-//
-// This function adds extra checks for diagnosing why the given oat file is
-// reported up to date, when it should be non-pic needing relocation.
-// These extra diagnostics checks should be removed once b/22599792 has been
-// resolved.
-static void DiagnoseFlakyTestFailure(const OatFile& oat_file) {
- Runtime* runtime = Runtime::Current();
- const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
- ASSERT_TRUE(image_space != nullptr);
- const ImageHeader& image_header = image_space->GetImageHeader();
- const OatHeader& oat_header = oat_file.GetOatHeader();
- EXPECT_FALSE(oat_file.IsPic());
- EXPECT_EQ(image_header.GetOatChecksum(), oat_header.GetImageFileLocationOatChecksum());
- EXPECT_NE(reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin()),
- oat_header.GetImageFileLocationOatDataBegin());
- EXPECT_NE(image_header.GetPatchDelta(), oat_header.GetImagePatchDelta());
-}
-
// Case: We have a DEX file and an ODEX file, but no OAT file.
// Expect: The status is kPatchOatNeeded.
TEST_F(OatFileAssistantTest, DexOdexNoOat) {
@@ -528,7 +541,7 @@
// Make the oat file up to date.
std::string error_msg;
- ASSERT_TRUE(oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
+ ASSERT_TRUE(oat_file_assistant.MakeUpToDate(outof(error_msg))) << error_msg;
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, oat_file_assistant.GetDexOptNeeded());
@@ -584,7 +597,7 @@
// Make the oat file up to date.
std::string error_msg;
- ASSERT_TRUE(oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
+ ASSERT_TRUE(oat_file_assistant.MakeUpToDate(outof(error_msg))) << error_msg;
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, oat_file_assistant.GetDexOptNeeded());
@@ -632,7 +645,7 @@
// Make the oat file up to date. This should have no effect.
std::string error_msg;
- EXPECT_TRUE(oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
+ EXPECT_TRUE(oat_file_assistant.MakeUpToDate(outof(error_msg))) << error_msg;
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, oat_file_assistant.GetDexOptNeeded());
@@ -676,7 +689,7 @@
// Make the oat file up to date.
std::string error_msg;
- ASSERT_TRUE(oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
+ ASSERT_TRUE(oat_file_assistant.MakeUpToDate(outof(error_msg))) << error_msg;
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, oat_file_assistant.GetDexOptNeeded());
@@ -817,7 +830,7 @@
OatFileAssistant oat_file_assistant(
dex_location.c_str(), oat_location.c_str(), kRuntimeISA, true);
std::string error_msg;
- ASSERT_TRUE(oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
+ ASSERT_TRUE(oat_file_assistant.MakeUpToDate(outof(error_msg))) << error_msg;
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_TRUE(oat_file.get() != nullptr);
@@ -907,7 +920,7 @@
// Trying to make it up to date should have no effect.
std::string error_msg;
- EXPECT_TRUE(oat_file_assistant.MakeUpToDate(&error_msg));
+ EXPECT_TRUE(oat_file_assistant.MakeUpToDate(outof(error_msg)));
EXPECT_TRUE(error_msg.empty());
}
@@ -946,7 +959,9 @@
ClassLinker* linker = Runtime::Current()->GetClassLinker();
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::vector<std::string> error_msgs;
- dex_files = linker->OpenDexFilesFromOat(dex_location_.c_str(), oat_location_.c_str(), &error_msgs);
+ dex_files = linker->OpenDexFilesFromOat(dex_location_.c_str(),
+ oat_location_.c_str(),
+ outof(error_msgs));
CHECK(!dex_files.empty()) << Join(error_msgs, '\n');
CHECK(dex_files[0]->GetOatDexFile() != nullptr) << dex_files[0]->GetLocation();
loaded_oat_file_ = dex_files[0]->GetOatDexFile()->GetOatFile();
@@ -1043,17 +1058,17 @@
std::string odex_file;
EXPECT_TRUE(OatFileAssistant::DexFilenameToOdexFilename(
- "/foo/bar/baz.jar", kArm, &odex_file, &error_msg)) << error_msg;
+ "/foo/bar/baz.jar", kArm, &odex_file, outof(error_msg))) << error_msg;
EXPECT_EQ("/foo/bar/oat/arm/baz.odex", odex_file);
EXPECT_TRUE(OatFileAssistant::DexFilenameToOdexFilename(
- "/foo/bar/baz.funnyext", kArm, &odex_file, &error_msg)) << error_msg;
+ "/foo/bar/baz.funnyext", kArm, &odex_file, outof(error_msg))) << error_msg;
EXPECT_EQ("/foo/bar/oat/arm/baz.odex", odex_file);
EXPECT_FALSE(OatFileAssistant::DexFilenameToOdexFilename(
- "nopath.jar", kArm, &odex_file, &error_msg));
+ "nopath.jar", kArm, &odex_file, outof(error_msg)));
EXPECT_FALSE(OatFileAssistant::DexFilenameToOdexFilename(
- "/foo/bar/baz_noext", kArm, &odex_file, &error_msg));
+ "/foo/bar/baz_noext", kArm, &odex_file, outof(error_msg)));
}
// Verify the dexopt status values from dalvik.system.DexFile
diff --git a/runtime/oat_file_test.cc b/runtime/oat_file_test.cc
index a88553c..3bd6df2 100644
--- a/runtime/oat_file_test.cc
+++ b/runtime/oat_file_test.cc
@@ -20,6 +20,7 @@
#include <gtest/gtest.h>
+#include "base/out.h"
#include "common_runtime_test.h"
#include "scoped_thread_state_change.h"
@@ -75,16 +76,16 @@
std::string error_msg;
// No dependencies.
- EXPECT_TRUE(OatFile::CheckStaticDexFileDependencies(nullptr, &error_msg)) << error_msg;
- EXPECT_TRUE(OatFile::CheckStaticDexFileDependencies("", &error_msg)) << error_msg;
+ EXPECT_TRUE(OatFile::CheckStaticDexFileDependencies(nullptr, outof(error_msg))) << error_msg;
+ EXPECT_TRUE(OatFile::CheckStaticDexFileDependencies("", outof(error_msg))) << error_msg;
// Ill-formed dependencies.
- EXPECT_FALSE(OatFile::CheckStaticDexFileDependencies("abc", &error_msg));
- EXPECT_FALSE(OatFile::CheckStaticDexFileDependencies("abc*123*def", &error_msg));
- EXPECT_FALSE(OatFile::CheckStaticDexFileDependencies("abc*def*", &error_msg));
+ EXPECT_FALSE(OatFile::CheckStaticDexFileDependencies("abc", outof(error_msg)));
+ EXPECT_FALSE(OatFile::CheckStaticDexFileDependencies("abc*123*def", outof(error_msg)));
+ EXPECT_FALSE(OatFile::CheckStaticDexFileDependencies("abc*def*", outof(error_msg)));
// Unsatisfiable dependency.
- EXPECT_FALSE(OatFile::CheckStaticDexFileDependencies("abc*123*", &error_msg));
+ EXPECT_FALSE(OatFile::CheckStaticDexFileDependencies("abc*123*", outof(error_msg)));
// Load some dex files to be able to do a real test.
ScopedObjectAccess soa(Thread::Current());
@@ -92,10 +93,10 @@
std::vector<std::unique_ptr<const DexFile>> dex_files1 = OpenTestDexFiles("Main");
std::vector<const DexFile*> dex_files_const1 = ToConstDexFiles(dex_files1);
std::string encoding1 = OatFile::EncodeDexFileDependencies(dex_files_const1);
- EXPECT_TRUE(OatFile::CheckStaticDexFileDependencies(encoding1.c_str(), &error_msg))
+ EXPECT_TRUE(OatFile::CheckStaticDexFileDependencies(encoding1.c_str(), outof(error_msg)))
<< error_msg << " " << encoding1;
std::vector<std::string> split1;
- EXPECT_TRUE(OatFile::GetDexLocationsFromDependencies(encoding1.c_str(), &split1));
+ EXPECT_TRUE(OatFile::GetDexLocationsFromDependencies(encoding1.c_str(), outof(split1)));
ASSERT_EQ(split1.size(), 1U);
EXPECT_EQ(split1[0], dex_files_const1[0]->GetLocation());
@@ -103,10 +104,10 @@
EXPECT_GT(dex_files2.size(), 1U);
std::vector<const DexFile*> dex_files_const2 = ToConstDexFiles(dex_files2);
std::string encoding2 = OatFile::EncodeDexFileDependencies(dex_files_const2);
- EXPECT_TRUE(OatFile::CheckStaticDexFileDependencies(encoding2.c_str(), &error_msg))
+ EXPECT_TRUE(OatFile::CheckStaticDexFileDependencies(encoding2.c_str(), outof(error_msg)))
<< error_msg << " " << encoding2;
std::vector<std::string> split2;
- EXPECT_TRUE(OatFile::GetDexLocationsFromDependencies(encoding2.c_str(), &split2));
+ EXPECT_TRUE(OatFile::GetDexLocationsFromDependencies(encoding2.c_str(), outof(split2)));
ASSERT_EQ(split2.size(), 2U);
EXPECT_EQ(split2[0], dex_files_const2[0]->GetLocation());
EXPECT_EQ(split2[1], dex_files_const2[1]->GetLocation());
diff --git a/runtime/object_lock.h b/runtime/object_lock.h
index acddc03..eb7cbd8 100644
--- a/runtime/object_lock.h
+++ b/runtime/object_lock.h
@@ -28,15 +28,15 @@
template <typename T>
class ObjectLock {
public:
- ObjectLock(Thread* self, Handle<T> object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ObjectLock(Thread* self, Handle<T> object) SHARED_REQUIRES(Locks::mutator_lock_);
- ~ObjectLock() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ~ObjectLock() SHARED_REQUIRES(Locks::mutator_lock_);
- void WaitIgnoringInterrupts() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void WaitIgnoringInterrupts() SHARED_REQUIRES(Locks::mutator_lock_);
- void Notify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Notify() SHARED_REQUIRES(Locks::mutator_lock_);
- void NotifyAll() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void NotifyAll() SHARED_REQUIRES(Locks::mutator_lock_);
private:
Thread* const self_;
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 7772354..7f82497 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -18,6 +18,7 @@
#include <sstream>
+#include "base/out.h"
#include "base/stringpiece.h"
#include "debugger.h"
#include "gc/heap.h"
@@ -41,12 +42,11 @@
// Runtime::Abort
}
-ParsedOptions* ParsedOptions::Create(const RuntimeOptions& options, bool ignore_unrecognized,
- RuntimeArgumentMap* runtime_options) {
- CHECK(runtime_options != nullptr);
-
+ParsedOptions* ParsedOptions::Create(const RuntimeOptions& options,
+ bool ignore_unrecognized,
+ out<RuntimeArgumentMap> runtime_options) {
std::unique_ptr<ParsedOptions> parsed(new ParsedOptions());
- if (parsed->Parse(options, ignore_unrecognized, runtime_options)) {
+ if (parsed->Parse(options, ignore_unrecognized, outof_forward(runtime_options))) {
return parsed.release();
}
return nullptr;
@@ -263,6 +263,9 @@
.Define("--cpu-abilist=_")
.WithType<std::string>()
.IntoKey(M::CpuAbiList)
+ .Define("-Xfingerprint:_")
+ .WithType<std::string>()
+ .IntoKey(M::Fingerprint)
.Define({"-Xexperimental-lambdas", "-Xnoexperimental-lambdas"})
.WithType<bool>()
.WithValues({true, false})
@@ -290,6 +293,7 @@
// As a side-effect, populate the hooks from options.
bool ParsedOptions::ProcessSpecialOptions(const RuntimeOptions& options,
RuntimeArgumentMap* runtime_options,
+ // TODO: should be an optional_out here.
std::vector<std::string>* out_options) {
using M = RuntimeArgumentMap;
@@ -396,7 +400,7 @@
}
bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognized,
- RuntimeArgumentMap* runtime_options) {
+ out<RuntimeArgumentMap> runtime_options) {
for (size_t i = 0; i < options.size(); ++i) {
if (true && options[0].first == "-Xzygote") {
LOG(INFO) << "option[" << i << "]=" << options[i].first;
@@ -407,7 +411,9 @@
// Convert to a simple string list (without the magic pointer options)
std::vector<std::string> argv_list;
- if (!ProcessSpecialOptions(options, nullptr, &argv_list)) {
+ if (!ProcessSpecialOptions(options,
+ nullptr, // No runtime argument map
+ outof(argv_list))) {
return false;
}
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index 529dd5c..bcd6228 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -22,6 +22,7 @@
#include <jni.h>
+#include "base/out_fwd.h"
#include "globals.h"
#include "gc/collector_type.h"
#include "gc/space/large_object_space.h"
@@ -50,8 +51,9 @@
static std::unique_ptr<RuntimeParser> MakeParser(bool ignore_unrecognized);
// returns true if parsing succeeds, and stores the resulting options into runtime_options
- static ParsedOptions* Create(const RuntimeOptions& options, bool ignore_unrecognized,
- RuntimeArgumentMap* runtime_options);
+ static ParsedOptions* Create(const RuntimeOptions& options,
+ bool ignore_unrecognized,
+ out<RuntimeArgumentMap> runtime_options);
bool (*hook_is_sensitive_thread_)();
jint (*hook_vfprintf_)(FILE* stream, const char* format, va_list ap);
@@ -63,6 +65,7 @@
bool ProcessSpecialOptions(const RuntimeOptions& options,
RuntimeArgumentMap* runtime_options,
+ // Optional out:
std::vector<std::string>* out_options);
void Usage(const char* fmt, ...);
@@ -72,8 +75,9 @@
void Exit(int status);
void Abort();
- bool Parse(const RuntimeOptions& options, bool ignore_unrecognized,
- RuntimeArgumentMap* runtime_options);
+ bool Parse(const RuntimeOptions& options,
+ bool ignore_unrecognized,
+ out<RuntimeArgumentMap> runtime_options);
};
} // namespace art
diff --git a/runtime/parsed_options_test.cc b/runtime/parsed_options_test.cc
index a8575de..81a48a6 100644
--- a/runtime/parsed_options_test.cc
+++ b/runtime/parsed_options_test.cc
@@ -18,6 +18,7 @@
#include <memory>
+#include "base/out.h"
#include "common_runtime_test.h"
namespace art {
@@ -60,7 +61,7 @@
options.push_back(std::make_pair("exit", test_exit));
RuntimeArgumentMap map;
- std::unique_ptr<ParsedOptions> parsed(ParsedOptions::Create(options, false, &map));
+ std::unique_ptr<ParsedOptions> parsed(ParsedOptions::Create(options, false, outof(map)));
ASSERT_TRUE(parsed.get() != nullptr);
ASSERT_NE(0u, map.Size());
@@ -102,7 +103,7 @@
options.push_back(std::make_pair("-Xgc:MC", nullptr));
RuntimeArgumentMap map;
- std::unique_ptr<ParsedOptions> parsed(ParsedOptions::Create(options, false, &map));
+ std::unique_ptr<ParsedOptions> parsed(ParsedOptions::Create(options, false, outof(map)));
ASSERT_TRUE(parsed.get() != nullptr);
ASSERT_NE(0u, map.Size());
diff --git a/runtime/prebuilt_tools_test.cc b/runtime/prebuilt_tools_test.cc
index 53bc876..a7f7bcd 100644
--- a/runtime/prebuilt_tools_test.cc
+++ b/runtime/prebuilt_tools_test.cc
@@ -23,7 +23,7 @@
namespace art {
// Run the tests only on host.
-#ifndef HAVE_ANDROID_OS
+#ifndef __ANDROID__
class PrebuiltToolsTest : public CommonRuntimeTest {
};
@@ -61,6 +61,6 @@
}
}
-#endif // HAVE_ANDROID_OS
+#endif // __ANDROID__
} // namespace art
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index 87b0d43..3db3265 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -59,13 +59,13 @@
public:
BoundedStackVisitor(std::vector<std::pair<ArtMethod*, uint32_t>>* stack,
Thread* thread, uint32_t max_depth)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
stack_(stack),
max_depth_(max_depth),
depth_(0) {}
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
return true;
@@ -88,7 +88,7 @@
// This is called from either a thread list traversal or from a checkpoint. Regardless
// of which caller, the mutator lock must be held.
-static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+static void GetSample(Thread* thread, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
BackgroundMethodSamplingProfiler* profiler =
reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
const ProfilerOptions profile_options = profiler->GetProfilerOptions();
diff --git a/runtime/profiler.h b/runtime/profiler.h
index 7611487..30babe3 100644
--- a/runtime/profiler.h
+++ b/runtime/profiler.h
@@ -104,8 +104,8 @@
explicit ProfileSampleResults(Mutex& lock);
~ProfileSampleResults();
- void Put(ArtMethod* method);
- void PutStack(const std::vector<InstructionLocation>& stack_dump);
+ void Put(ArtMethod* method) REQUIRES(!lock_);
+ void PutStack(const std::vector<InstructionLocation>& stack_dump) REQUIRES(!lock_);
uint32_t Write(std::ostream &os, ProfileDataType type);
void ReadPrevious(int fd, ProfileDataType type);
void Clear();
@@ -168,17 +168,19 @@
// Start a profile thread with the user-supplied arguments.
// Returns true if the profile was started or if it was already running. Returns false otherwise.
static bool Start(const std::string& output_filename, const ProfilerOptions& options)
- LOCKS_EXCLUDED(Locks::mutator_lock_,
- Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_,
- Locks::profiler_lock_);
+ REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
+ !Locks::profiler_lock_);
- static void Stop() LOCKS_EXCLUDED(Locks::profiler_lock_, wait_lock_);
- static void Shutdown() LOCKS_EXCLUDED(Locks::profiler_lock_);
+ // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
+ static void Stop() REQUIRES(!Locks::profiler_lock_, !wait_lock_, !Locks::profiler_lock_)
+ NO_THREAD_SAFETY_ANALYSIS;
+ // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
+ static void Shutdown() REQUIRES(!Locks::profiler_lock_) NO_THREAD_SAFETY_ANALYSIS;
- void RecordMethod(ArtMethod *method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void RecordStack(const std::vector<InstructionLocation>& stack) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool ProcessMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void RecordMethod(ArtMethod *method) SHARED_REQUIRES(Locks::mutator_lock_);
+ void RecordStack(const std::vector<InstructionLocation>& stack)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool ProcessMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
const ProfilerOptions& GetProfilerOptions() const { return options_; }
Barrier& GetBarrier() {
@@ -190,13 +192,15 @@
const std::string& output_filename, const ProfilerOptions& options);
// The sampling interval in microseconds is passed as an argument.
- static void* RunProfilerThread(void* arg) LOCKS_EXCLUDED(Locks::profiler_lock_);
+ // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
+ static void* RunProfilerThread(void* arg) REQUIRES(!Locks::profiler_lock_)
+ NO_THREAD_SAFETY_ANALYSIS;
- uint32_t WriteProfile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t WriteProfile() SHARED_REQUIRES(Locks::mutator_lock_);
void CleanProfile();
- uint32_t DumpProfile(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static bool ShuttingDown(Thread* self) LOCKS_EXCLUDED(Locks::profiler_lock_);
+ uint32_t DumpProfile(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
+ static bool ShuttingDown(Thread* self) REQUIRES(!Locks::profiler_lock_);
static BackgroundMethodSamplingProfiler* profiler_ GUARDED_BY(Locks::profiler_lock_);
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index f40c0f1..c33b126 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -34,7 +34,7 @@
mirror::Class* GenerateProxyClass(ScopedObjectAccess& soa, jobject jclass_loader,
const char* className,
const std::vector<mirror::Class*>& interfaces)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Class* javaLangObject = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
CHECK(javaLangObject != nullptr);
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 0d39e22..75ff27f 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -39,6 +39,7 @@
kIntrinsicFloatCvt,
kIntrinsicReverseBits,
kIntrinsicReverseBytes,
+ kIntrinsicNumberOfLeadingZeros,
kIntrinsicAbsInt,
kIntrinsicAbsLong,
kIntrinsicAbsFloat,
@@ -157,7 +158,7 @@
* @return true if the method is a candidate for inlining, false otherwise.
*/
static bool AnalyseMethodCode(verifier::MethodVerifier* verifier, InlineMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static constexpr bool IsInstructionIGet(Instruction::Code opcode) {
return Instruction::IGET <= opcode && opcode <= Instruction::IGET_SHORT;
@@ -182,16 +183,16 @@
static bool AnalyseReturnMethod(const DexFile::CodeItem* code_item, InlineMethod* result);
static bool AnalyseConstMethod(const DexFile::CodeItem* code_item, InlineMethod* result);
static bool AnalyseIGetMethod(verifier::MethodVerifier* verifier, InlineMethod* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static bool AnalyseIPutMethod(verifier::MethodVerifier* verifier, InlineMethod* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can we fast path instance field access in a verified accessor?
// If yes, computes field's offset and volatility and whether the method is static or not.
static bool ComputeSpecialAccessorInfo(uint32_t field_idx, bool is_put,
verifier::MethodVerifier* verifier,
InlineIGetIPutData* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
};
} // namespace art
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 02baad7..d1a4081 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -45,14 +45,14 @@
public:
CatchBlockStackVisitor(Thread* self, Context* context, Handle<mirror::Throwable>* exception,
QuickExceptionHandler* exception_handler)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
self_(self),
exception_(exception),
exception_handler_(exception_handler) {
}
- bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
if (method == nullptr) {
@@ -83,7 +83,7 @@
private:
bool HandleTryItems(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t dex_pc = DexFile::kDexNoIndex;
if (!method->IsNative()) {
dex_pc = GetDexPc();
@@ -159,7 +159,7 @@
class DeoptimizeStackVisitor FINAL : public StackVisitor {
public:
DeoptimizeStackVisitor(Thread* self, Context* context, QuickExceptionHandler* exception_handler)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
self_(self),
exception_handler_(exception_handler),
@@ -167,7 +167,7 @@
stacked_shadow_frame_pushed_(false) {
}
- bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
ArtMethod* method = GetMethod();
if (method == nullptr) {
@@ -196,7 +196,7 @@
return static_cast<VRegKind>(kinds.at(reg * 2));
}
- bool HandleDeoptimization(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool HandleDeoptimization(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
CHECK(code_item != nullptr);
uint16_t num_regs = code_item->registers_size_;
@@ -350,14 +350,14 @@
class InstrumentationStackVisitor : public StackVisitor {
public:
InstrumentationStackVisitor(Thread* self, size_t frame_depth)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
frame_depth_(frame_depth),
instrumentation_frames_to_pop_(0) {
CHECK_NE(frame_depth_, kInvalidFrameDepth);
}
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
size_t current_frame_depth = GetFrameDepth();
if (current_frame_depth < frame_depth_) {
CHECK(GetMethod() != nullptr);
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index 8d7cd12..ce9085d 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -36,17 +36,17 @@
class QuickExceptionHandler {
public:
QuickExceptionHandler(Thread* self, bool is_deoptimization)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
NO_RETURN ~QuickExceptionHandler() {
LOG(FATAL) << "UNREACHABLE"; // Expected to take long jump.
UNREACHABLE();
}
- void FindCatch(mirror::Throwable* exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void DeoptimizeStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void UpdateInstrumentationStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- NO_RETURN void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FindCatch(mirror::Throwable* exception) SHARED_REQUIRES(Locks::mutator_lock_);
+ void DeoptimizeStack() SHARED_REQUIRES(Locks::mutator_lock_);
+ void UpdateInstrumentationStack() SHARED_REQUIRES(Locks::mutator_lock_);
+ NO_RETURN void DoLongJump() SHARED_REQUIRES(Locks::mutator_lock_);
void SetHandlerQuickFrame(ArtMethod** handler_quick_frame) {
handler_quick_frame_ = handler_quick_frame;
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 55cef68..e7ad731 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -49,7 +49,7 @@
bool kMaybeDuringStartup = false>
ALWAYS_INLINE static MirrorType* Barrier(
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// It's up to the implementation whether the given root gets updated
// whereas the return value must be an updated reference.
@@ -57,7 +57,7 @@
bool kMaybeDuringStartup = false>
ALWAYS_INLINE static MirrorType* BarrierForRoot(MirrorType** root,
GcRootSource* gc_root_source = nullptr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// It's up to the implementation whether the given root gets updated
// whereas the return value must be an updated reference.
@@ -65,24 +65,24 @@
bool kMaybeDuringStartup = false>
ALWAYS_INLINE static MirrorType* BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
GcRootSource* gc_root_source = nullptr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static bool IsDuringStartup();
// Without the holder object.
static void AssertToSpaceInvariant(mirror::Object* ref)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
}
// With the holder object.
static void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
mirror::Object* ref)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// With GcRootSource.
static void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- static mirror::Object* Mark(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static mirror::Object* Mark(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
static mirror::Object* WhitePtr() {
return reinterpret_cast<mirror::Object*>(white_ptr_);
@@ -96,7 +96,7 @@
ALWAYS_INLINE static bool HasGrayReadBarrierPointer(mirror::Object* obj,
uintptr_t* out_rb_ptr_high_bits)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Note: These couldn't be constexpr pointers as reinterpret_cast isn't compatible with them.
static constexpr uintptr_t white_ptr_ = 0x0; // Not marked.
diff --git a/runtime/read_barrier_c.h b/runtime/read_barrier_c.h
index 4f408dd..710c21f 100644
--- a/runtime/read_barrier_c.h
+++ b/runtime/read_barrier_c.h
@@ -47,9 +47,4 @@
#error "Only one of Baker or Brooks can be enabled at a time."
#endif
-// A placeholder marker to indicate places to add read barriers in the
-// assembly code. This is a development time aid and to be removed
-// after read barriers are added.
-#define THIS_LOAD_REQUIRES_READ_BARRIER
-
#endif // ART_RUNTIME_READ_BARRIER_C_H_
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index a31d8ac..49b6a38 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -62,7 +62,7 @@
// If "obj" is an array, return the number of elements in the array.
// Otherwise, return zero.
-static size_t GetElementCount(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+static size_t GetElementCount(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
// We assume the special cleared value isn't an array in the if statement below.
DCHECK(!Runtime::Current()->GetClearedJniWeakGlobal()->IsArrayInstance());
if (obj == nullptr || !obj->IsArrayInstance()) {
@@ -78,7 +78,7 @@
// or equivalent to the original.
static void DumpSummaryLine(std::ostream& os, mirror::Object* obj, size_t element_count,
int identical, int equiv)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (obj == nullptr) {
os << " null reference (count=" << equiv << ")\n";
return;
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index 94f16b6..f90ccd1 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -41,22 +41,22 @@
ReferenceTable(const char* name, size_t initial_size, size_t max_size);
~ReferenceTable();
- void Add(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Add(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
- void Remove(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Remove(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
size_t Size() const;
- void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Dump(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
typedef std::vector<GcRoot<mirror::Object>,
TrackingAllocator<GcRoot<mirror::Object>, kAllocatorTagReferenceTable>> Table;
static void Dump(std::ostream& os, Table& entries)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
friend class IndirectReferenceTable; // For Dump.
std::string name_;
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 11522d9..ee2e2c5 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -72,7 +72,7 @@
num_bytes_ += 4;
}
- void Append(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void Append(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
Append(StackReference<mirror::Object>::FromMirrorPtr(obj).AsVRegValue());
}
@@ -96,7 +96,7 @@
void BuildArgArrayFromVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
mirror::Object* receiver, va_list ap)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Set receiver if non-null (method is not static)
if (receiver != nullptr) {
Append(receiver);
@@ -132,7 +132,7 @@
void BuildArgArrayFromJValues(const ScopedObjectAccessAlreadyRunnable& soa,
mirror::Object* receiver, jvalue* args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Set receiver if non-null (method is not static)
if (receiver != nullptr) {
Append(receiver);
@@ -171,7 +171,7 @@
}
void BuildArgArrayFromFrame(ShadowFrame* shadow_frame, uint32_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Set receiver if non-null (method is not static)
size_t cur_arg = arg_offset;
if (!shadow_frame->GetMethod()->IsStatic()) {
@@ -206,7 +206,7 @@
static void ThrowIllegalPrimitiveArgumentException(const char* expected,
const char* found_descriptor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ThrowIllegalArgumentException(
StringPrintf("Invalid primitive conversion from %s to %s", expected,
PrettyDescriptor(found_descriptor).c_str()).c_str());
@@ -214,7 +214,7 @@
bool BuildArgArrayFromObjectArray(mirror::Object* receiver,
mirror::ObjectArray<mirror::Object>* args, ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::TypeList* classes = m->GetParameterTypeList();
// Set receiver if non-null (method is not static)
if (receiver != nullptr) {
@@ -343,7 +343,7 @@
};
static void CheckMethodArguments(JavaVMExt* vm, ArtMethod* m, uint32_t* args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::TypeList* params = m->GetParameterTypeList();
if (params == nullptr) {
return; // No arguments so nothing to check.
@@ -418,7 +418,7 @@
}
static ArtMethod* FindVirtualMethod(mirror::Object* receiver, ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method, sizeof(void*));
}
@@ -426,7 +426,7 @@
static void InvokeWithArgArray(const ScopedObjectAccessAlreadyRunnable& soa,
ArtMethod* method, ArgArray* arg_array, JValue* result,
const char* shorty)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t* args = arg_array->GetArray();
if (UNLIKELY(soa.Env()->check_jni)) {
CheckMethodArguments(soa.Vm(), method->GetInterfaceMethodIfProxy(sizeof(void*)), args);
@@ -436,7 +436,7 @@
JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
va_list args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// We want to make sure that the stack is not within a small distance from the
// protected region in case we are calling into a leaf function whose stack
// check has been elided.
@@ -730,7 +730,7 @@
}
static std::string UnboxingFailureKind(ArtField* f)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (f != nullptr) {
return "field " + PrettyField(f, false);
}
@@ -740,7 +740,7 @@
static bool UnboxPrimitive(mirror::Object* o,
mirror::Class* dst_class, ArtField* f,
JValue* unboxed_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
bool unbox_for_result = (f == nullptr);
if (!dst_class->IsPrimitive()) {
if (UNLIKELY(o != nullptr && !o->InstanceOf(dst_class))) {
diff --git a/runtime/reflection.h b/runtime/reflection.h
index 825a721..d9c38c1 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -33,60 +33,60 @@
class ShadowFrame;
mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, ArtField* f,
JValue* unboxed_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool UnboxPrimitiveForResult(mirror::Object* o, mirror::Class* dst_class, JValue* unboxed_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE bool ConvertPrimitiveValue(bool unbox_for_result,
Primitive::Type src_class, Primitive::Type dst_class,
const JValue& src, JValue* dst)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
va_list args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
jvalue* args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnable& soa,
jobject obj, jmethodID mid, jvalue* args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
jobject obj, jmethodID mid, va_list args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// num_frames is number of frames we look up for access check.
jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject method, jobject receiver,
jobject args, size_t num_frames = 1)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class,
uint32_t access_flags, mirror::Class** calling_class, size_t num_frames)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// This version takes a known calling class.
bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class,
uint32_t access_flags, mirror::Class* calling_class)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get the calling class by using a stack visitor, may return null for unattached native threads.
mirror::Class* GetCallingClass(Thread* self, size_t num_frames)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void InvalidReceiverError(mirror::Object* o, mirror::Class* c)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void UpdateReference(Thread* self, jobject obj, mirror::Object* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
} // namespace art
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 9707fb8..bd89be5 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -85,7 +85,7 @@
mirror::Object** receiver,
bool is_static, const char* method_name,
const char* method_signature)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const char* class_name = is_static ? "StaticLeafMethods" : "NonStaticLeafMethods";
jobject jclass_loader(LoadDex(class_name));
Thread* self = Thread::Current();
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index 68d5ad2..380e72b 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -66,13 +66,13 @@
}
inline ArtMethod* Runtime::GetCalleeSaveMethod(CalleeSaveType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(HasCalleeSaveMethod(type));
return GetCalleeSaveMethodUnchecked(type);
}
inline ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return reinterpret_cast<ArtMethod*>(callee_save_methods_[type]);
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index cc8b215..a27acb2 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -56,6 +56,7 @@
#include "atomic.h"
#include "base/arena_allocator.h"
#include "base/dumpable.h"
+#include "base/out.h"
#include "base/unix_file/fd_file.h"
#include "class_linker-inl.h"
#include "compiler_callbacks.h"
@@ -307,8 +308,8 @@
Thread* self = Thread::Current();
if (self == nullptr) {
os << "(Aborting thread was not attached to runtime!)\n";
- DumpKernelStack(os, GetTid(), " kernel: ", false);
- DumpNativeStack(os, GetTid(), " native: ", nullptr);
+ DumpKernelStack(os, GetTid(), " kernel: ", false /* don't include count */);
+ DumpNativeStack(os, GetTid(), " native: ", nullptr /* no ucontext ptr */);
} else {
os << "Aborting thread:\n";
if (Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self)) {
@@ -417,7 +418,7 @@
if (Runtime::instance_ != nullptr) {
return false;
}
- InitLogging(nullptr); // Calls Locks::Init() as a side effect.
+ InitLogging(nullptr /* no argv */); // Calls Locks::Init() as a side effect.
instance_ = new Runtime;
if (!instance_->Init(options, ignore_unrecognized)) {
// TODO: Currently deleting the instance will abort the runtime on destruction. Now This will
@@ -590,7 +591,7 @@
return true;
}
-void Runtime::EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
+void Runtime::EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
DCHECK_GT(threads_being_born_, 0U);
threads_being_born_--;
if (shutting_down_started_ && threads_being_born_ == 0) {
@@ -607,14 +608,14 @@
// See storage config details at http://source.android.com/tech/storage/
// Create private mount namespace shared by all children
if (unshare(CLONE_NEWNS) == -1) {
- PLOG(WARNING) << "Failed to unshare()";
+ PLOG(ERROR) << "Failed to unshare()";
return false;
}
// Mark rootfs as being a slave so that changes from default
// namespace only flow into our children.
if (mount("rootfs", "/", nullptr, (MS_SLAVE | MS_REC), nullptr) == -1) {
- PLOG(WARNING) << "Failed to mount() rootfs as MS_SLAVE";
+ PLOG(ERROR) << "Failed to mount() rootfs as MS_SLAVE";
return false;
}
@@ -625,7 +626,7 @@
if (target_base != nullptr) {
if (mount("tmpfs", target_base, "tmpfs", MS_NOSUID | MS_NODEV,
"uid=0,gid=1028,mode=0751") == -1) {
- LOG(WARNING) << "Failed to mount tmpfs to " << target_base;
+ PLOG(ERROR) << "Failed to mount tmpfs to " << target_base;
return false;
}
}
@@ -659,7 +660,7 @@
// before fork aren't attributed to an app.
heap_->ResetGcPerformanceInfo();
- if (jit_.get() == nullptr && jit_options_->UseJIT()) {
+ if (jit_ == nullptr && jit_options_->UseJIT()) {
// Create the JIT if the flag is set and we haven't already create it (happens for run-tests).
CreateJit();
}
@@ -707,9 +708,8 @@
}
static bool OpenDexFilesFromImage(const std::string& image_location,
- std::vector<std::unique_ptr<const DexFile>>* dex_files,
- size_t* failures) {
- DCHECK(dex_files != nullptr) << "OpenDexFilesFromImage: out-param is nullptr";
+ out<std::vector<std::unique_ptr<const DexFile>>> dex_files,
+ out<size_t> failures) {
std::string system_filename;
bool has_system = false;
std::string cache_filename_unused;
@@ -718,12 +718,12 @@
bool is_global_cache_unused;
bool found_image = gc::space::ImageSpace::FindImageFilename(image_location.c_str(),
kRuntimeISA,
- &system_filename,
- &has_system,
- &cache_filename_unused,
- &dalvik_cache_exists_unused,
- &has_cache_unused,
- &is_global_cache_unused);
+ outof(system_filename),
+ outof(has_system),
+ outof(cache_filename_unused),
+ outof(dalvik_cache_exists_unused),
+ outof(has_cache_unused),
+ outof(is_global_cache_unused));
*failures = 0;
if (!found_image || !has_system) {
return false;
@@ -737,12 +737,12 @@
if (file.get() == nullptr) {
return false;
}
- std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.release(), false, false, &error_msg));
+ std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.release(), false, false, outof(error_msg)));
if (elf_file.get() == nullptr) {
return false;
}
std::unique_ptr<OatFile> oat_file(OatFile::OpenWithElfFile(elf_file.release(), oat_location,
- nullptr, &error_msg));
+ nullptr, outof(error_msg)));
if (oat_file.get() == nullptr) {
LOG(INFO) << "Unable to use '" << oat_filename << "' because " << error_msg;
return false;
@@ -753,7 +753,7 @@
*failures += 1;
continue;
}
- std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
+ std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(outof(error_msg));
if (dex_file.get() == nullptr) {
*failures += 1;
} else {
@@ -768,10 +768,11 @@
static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
const std::vector<std::string>& dex_locations,
const std::string& image_location,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is nullptr";
+ out<std::vector<std::unique_ptr<const DexFile>>> dex_files) {
size_t failure_count = 0;
- if (!image_location.empty() && OpenDexFilesFromImage(image_location, dex_files, &failure_count)) {
+ if (!image_location.empty() && OpenDexFilesFromImage(image_location,
+ outof_forward(dex_files),
+ outof(failure_count))) {
return failure_count;
}
failure_count = 0;
@@ -783,7 +784,7 @@
LOG(WARNING) << "Skipping non-existent dex file '" << dex_filename << "'";
continue;
}
- if (!DexFile::Open(dex_filename, dex_location, &error_msg, dex_files)) {
+ if (!DexFile::Open(dex_filename, dex_location, outof(error_msg), outof_forward(dex_files))) {
LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg;
++failure_count;
}
@@ -800,7 +801,7 @@
using Opt = RuntimeArgumentMap;
RuntimeArgumentMap runtime_options;
std::unique_ptr<ParsedOptions> parsed_options(
- ParsedOptions::Create(raw_options, ignore_unrecognized, &runtime_options));
+ ParsedOptions::Create(raw_options, ignore_unrecognized, outof(runtime_options)));
if (parsed_options.get() == nullptr) {
LOG(ERROR) << "Failed to parse options";
ATRACE_END();
@@ -852,6 +853,8 @@
Split(runtime_options.GetOrDefault(Opt::CpuAbiList), ',', &cpu_abilist_);
+ fingerprint_ = runtime_options.ReleaseOrDefault(Opt::Fingerprint);
+
if (runtime_options.GetOrDefault(Opt::Interpret)) {
GetInstrumentation()->ForceInterpretOnly();
}
@@ -1036,7 +1039,7 @@
OpenDexFiles(dex_filenames,
dex_locations,
runtime_options.GetOrDefault(Opt::Image),
- &boot_class_path);
+ outof(boot_class_path));
instruction_set_ = runtime_options.GetOrDefault(Opt::ImageInstructionSet);
class_linker_->InitWithoutImage(std::move(boot_class_path));
@@ -1165,7 +1168,7 @@
// the library that implements System.loadLibrary!
{
std::string reason;
- if (!java_vm_->LoadNativeLibrary(env, "libjavacore.so", nullptr, &reason)) {
+ if (!java_vm_->LoadNativeLibrary(env, "libjavacore.so", nullptr, outof(reason))) {
LOG(FATAL) << "LoadNativeLibrary failed for \"libjavacore.so\": " << reason;
}
}
@@ -1328,7 +1331,9 @@
signals.Block();
}
-bool Runtime::AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
+bool Runtime::AttachCurrentThread(const char* thread_name,
+ bool as_daemon,
+ jobject thread_group,
bool create_peer) {
return Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != nullptr;
}
@@ -1434,7 +1439,8 @@
thread_list_->VisitRoots(visitor);
}
-size_t Runtime::FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
+size_t Runtime::FlipThreadRoots(Closure* thread_flip_visitor,
+ Closure* flip_callback,
gc::collector::GarbageCollector* collector) {
return thread_list_->FlipThreadRoots(thread_flip_visitor, flip_callback, collector);
}
@@ -1621,50 +1627,64 @@
preinitialization_transaction_->ThrowAbortError(self, nullptr);
}
-void Runtime::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset,
- uint8_t value, bool is_volatile) const {
+void Runtime::RecordWriteFieldBoolean(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint8_t value,
+ bool is_volatile) const {
DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteFieldBoolean(obj, field_offset, value, is_volatile);
}
-void Runtime::RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset,
- int8_t value, bool is_volatile) const {
+void Runtime::RecordWriteFieldByte(mirror::Object* obj,
+ MemberOffset field_offset,
+ int8_t value,
+ bool is_volatile) const {
DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteFieldByte(obj, field_offset, value, is_volatile);
}
-void Runtime::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset,
- uint16_t value, bool is_volatile) const {
+void Runtime::RecordWriteFieldChar(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint16_t value,
+ bool is_volatile) const {
DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteFieldChar(obj, field_offset, value, is_volatile);
}
-void Runtime::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset,
- int16_t value, bool is_volatile) const {
+void Runtime::RecordWriteFieldShort(mirror::Object* obj,
+ MemberOffset field_offset,
+ int16_t value,
+ bool is_volatile) const {
DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteFieldShort(obj, field_offset, value, is_volatile);
}
-void Runtime::RecordWriteField32(mirror::Object* obj, MemberOffset field_offset,
- uint32_t value, bool is_volatile) const {
+void Runtime::RecordWriteField32(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint32_t value,
+ bool is_volatile) const {
DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteField32(obj, field_offset, value, is_volatile);
}
-void Runtime::RecordWriteField64(mirror::Object* obj, MemberOffset field_offset,
- uint64_t value, bool is_volatile) const {
+void Runtime::RecordWriteField64(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint64_t value,
+ bool is_volatile) const {
DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteField64(obj, field_offset, value, is_volatile);
}
-void Runtime::RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
- mirror::Object* value, bool is_volatile) const {
+void Runtime::RecordWriteFieldReference(mirror::Object* obj,
+ MemberOffset field_offset,
+ mirror::Object* value,
+ bool is_volatile) const {
DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteFieldReference(obj, field_offset, value, is_volatile);
@@ -1705,7 +1725,7 @@
fault_message_ = message;
}
-void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* argv)
+void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(out<std::vector<std::string>> argv)
const {
if (GetInstrumentation()->InterpretOnly() || UseJit()) {
argv->push_back("--compiler-filter=interpret-only");
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 55adaf1..206623e 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -28,6 +28,7 @@
#include "arch/instruction_set.h"
#include "base/macros.h"
+#include "base/out.h"
#include "gc_root.h"
#include "instrumentation.h"
#include "jobject_comparator.h"
@@ -184,19 +185,19 @@
bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
bool IsShuttingDown(Thread* self);
- bool IsShuttingDownLocked() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
+ bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) {
return shutting_down_;
}
- size_t NumberOfThreadsBeingBorn() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
+ size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) {
return threads_being_born_;
}
- void StartThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
+ void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
threads_being_born_++;
}
- void EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
+ void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_);
bool IsStarted() const {
return started_;
@@ -212,7 +213,7 @@
// Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
// callers should prefer.
- NO_RETURN static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_);
+ NO_RETURN static void Abort() REQUIRES(!Locks::abort_lock_);
// Returns the "main" ThreadGroup, used when attaching user threads.
jobject GetMainThreadGroup() const;
@@ -224,13 +225,15 @@
jobject GetSystemClassLoader() const;
// Attaches the calling native thread to the runtime.
- bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
+ bool AttachCurrentThread(const char* thread_name,
+ bool as_daemon,
+ jobject thread_group,
bool create_peer);
void CallExitHook(jint status);
// Detaches the current native thread from the runtime.
- void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_);
+ void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os);
void DumpLockHolders(std::ostream& os);
@@ -279,15 +282,14 @@
}
// Is the given object the special object used to mark a cleared JNI weak global?
- bool IsClearedJniWeakGlobal(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsClearedJniWeakGlobal(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
// Get the special object used to mark a cleared JNI weak global.
- mirror::Object* GetClearedJniWeakGlobal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* GetClearedJniWeakGlobal() SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Throwable* GetPreAllocatedOutOfMemoryError() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Throwable* GetPreAllocatedOutOfMemoryError() SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Throwable* GetPreAllocatedNoClassDefFoundError() SHARED_REQUIRES(Locks::mutator_lock_);
const std::vector<std::string>& GetProperties() const {
return properties_;
@@ -301,77 +303,77 @@
return "2.1.0";
}
- void DisallowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void AllowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void EnsureNewSystemWeaksDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void BroadcastForNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void DisallowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
+ void AllowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
+ void EnsureNewSystemWeaksDisallowed() SHARED_REQUIRES(Locks::mutator_lock_);
+ void BroadcastForNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
// Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
// clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Visit image roots, only used for hprof since the GC uses the image space mod union table
// instead.
- void VisitImageRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitImageRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
// Visit all of the roots we can do safely do concurrently.
- void VisitConcurrentRoots(RootVisitor* visitor,
- VisitRootFlags flags = kVisitRootFlagAllRoots)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Visit all of the non thread roots, we can do this with mutators unpaused.
void VisitNonThreadRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void VisitTransactionRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Visit all of the thread roots.
- void VisitThreadRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitThreadRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
// Flip thread roots from from-space refs to to-space refs.
- size_t FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
+ size_t FlipThreadRoots(Closure* thread_flip_visitor,
+ Closure* flip_callback,
gc::collector::GarbageCollector* collector)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
// Visit all other roots which must be done with mutators suspended.
void VisitNonConcurrentRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
// system weak is updated to be the visitor's returned value.
void SweepSystemWeaks(IsMarkedVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Constant roots are the roots which never change after the runtime is initialized, they only
// need to be visited once per GC cycle.
void VisitConstantRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns a special method that calls into a trampoline for runtime method resolution
- ArtMethod* GetResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetResolutionMethod() SHARED_REQUIRES(Locks::mutator_lock_);
bool HasResolutionMethod() const {
return resolution_method_ != nullptr;
}
- void SetResolutionMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetResolutionMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* CreateResolutionMethod() SHARED_REQUIRES(Locks::mutator_lock_);
// Returns a special method that calls into a trampoline for runtime imt conflicts.
- ArtMethod* GetImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* GetImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetImtConflictMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* GetImtUnimplementedMethod() SHARED_REQUIRES(Locks::mutator_lock_);
bool HasImtConflictMethod() const {
return imt_conflict_method_ != nullptr;
}
- void SetImtConflictMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetImtUnimplementedMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetImtConflictMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetImtUnimplementedMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* CreateImtConflictMethod() SHARED_REQUIRES(Locks::mutator_lock_);
// Returns a special method that describes all callee saves being spilled to the stack.
enum CalleeSaveType {
@@ -386,17 +388,17 @@
}
ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
return callee_save_method_frame_infos_[type];
}
QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
return OFFSETOF_MEMBER(Runtime, callee_save_methods_[type]);
@@ -410,7 +412,7 @@
void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
- ArtMethod* CreateCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* CreateCalleeSaveMethod() SHARED_REQUIRES(Locks::mutator_lock_);
int32_t GetStat(int kind);
@@ -424,8 +426,8 @@
void ResetStats(int kinds);
- void SetStatsEnabled(bool new_state) LOCKS_EXCLUDED(Locks::instrument_entrypoints_lock_,
- Locks::mutator_lock_);
+ void SetStatsEnabled(bool new_state)
+ REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_);
enum class NativeBridgeAction { // private
kUnload,
@@ -463,43 +465,57 @@
bool IsTransactionAborted() const;
void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void ThrowTransactionAbortError(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
+ void RecordWriteFieldBoolean(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint8_t value,
bool is_volatile) const;
- void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
+ void RecordWriteFieldByte(mirror::Object* obj,
+ MemberOffset field_offset,
+ int8_t value,
bool is_volatile) const;
- void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
+ void RecordWriteFieldChar(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint16_t value,
bool is_volatile) const;
- void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
+ void RecordWriteFieldShort(mirror::Object* obj,
+ MemberOffset field_offset,
+ int16_t value,
bool is_volatile) const;
- void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
+ void RecordWriteField32(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint32_t value,
bool is_volatile) const;
- void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
+ void RecordWriteField64(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint64_t value,
bool is_volatile) const;
- void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
- mirror::Object* value, bool is_volatile) const;
+ void RecordWriteFieldReference(mirror::Object* obj,
+ MemberOffset field_offset,
+ mirror::Object* value,
+ bool is_volatile) const;
void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void RecordStrongStringInsertion(mirror::String* s) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ REQUIRES(Locks::intern_table_lock_);
void RecordWeakStringInsertion(mirror::String* s) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ REQUIRES(Locks::intern_table_lock_);
void RecordStrongStringRemoval(mirror::String* s) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ REQUIRES(Locks::intern_table_lock_);
void RecordWeakStringRemoval(mirror::String* s) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ REQUIRES(Locks::intern_table_lock_);
- void SetFaultMessage(const std::string& message);
+ void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_);
// Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
// with the unexpected_signal_lock_.
const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
return fault_message_;
}
- void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
+ void AddCurrentRuntimeFeaturesAsDex2OatArguments(out<std::vector<std::string>> arg_vector) const;
bool ExplicitStackOverflowChecks() const {
return !implicit_so_checks_;
@@ -563,6 +579,11 @@
bool IsDebuggable() const;
+ // Returns the build fingerprint, if set. Otherwise an empty string is returned.
+ std::string GetFingerprint() {
+ return fingerprint_;
+ }
+
private:
static void InitPlatformSignalHandlers();
@@ -572,7 +593,7 @@
bool Init(const RuntimeOptions& options, bool ignore_unrecognized)
SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
- void InitNativeMethods() LOCKS_EXCLUDED(Locks::mutator_lock_);
+ void InitNativeMethods() REQUIRES(!Locks::mutator_lock_);
void InitThreadGroups(Thread* self);
void RegisterRuntimeNativeMethods(JNIEnv* env);
@@ -757,6 +778,9 @@
MethodRefToStringInitRegMap method_ref_string_init_reg_map_;
+ // Contains the build fingerprint, if given as a parameter.
+ std::string fingerprint_;
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 9922c5f..02ed3a2 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -112,6 +112,7 @@
RUNTIME_OPTIONS_KEY (unsigned int, ZygoteMaxFailedBoots, 10)
RUNTIME_OPTIONS_KEY (Unit, NoDexFileFallback)
RUNTIME_OPTIONS_KEY (std::string, CpuAbiList)
+RUNTIME_OPTIONS_KEY (std::string, Fingerprint)
RUNTIME_OPTIONS_KEY (bool, ExperimentalLambdas, false) // -X[no]experimental-lambdas
// Not parse-able from command line, but can be provided explicitly.
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 1cc2df6..b90aa0e 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -34,7 +34,7 @@
class ScopedThreadStateChange {
public:
ScopedThreadStateChange(Thread* self, ThreadState new_thread_state)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
+ REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE
: self_(self), thread_state_(new_thread_state), expected_has_no_thread_(false) {
if (UNLIKELY(self_ == nullptr)) {
// Value chosen arbitrarily and won't be used in the destructor since thread_ == null.
@@ -59,7 +59,7 @@
}
}
- ~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE {
+ ~ScopedThreadStateChange() REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE {
if (UNLIKELY(self_ == nullptr)) {
if (!expected_has_no_thread_) {
Runtime* runtime = Runtime::Current();
@@ -130,7 +130,7 @@
* it's best if we don't grab a mutex.
*/
template<typename T>
- T AddLocalReference(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ T AddLocalReference(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
DCHECK_NE(obj, Runtime::Current()->GetClearedJniWeakGlobal());
@@ -139,32 +139,32 @@
template<typename T>
T Decode(jobject obj) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
return down_cast<T>(Self()->DecodeJObject(obj));
}
ArtField* DecodeField(jfieldID fid) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
return reinterpret_cast<ArtField*>(fid);
}
- jfieldID EncodeField(ArtField* field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jfieldID EncodeField(ArtField* field) const SHARED_REQUIRES(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
return reinterpret_cast<jfieldID>(field);
}
- ArtMethod* DecodeMethod(jmethodID mid) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ArtMethod* DecodeMethod(jmethodID mid) const SHARED_REQUIRES(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
return reinterpret_cast<ArtMethod*>(mid);
}
- jmethodID EncodeMethod(ArtMethod* method) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jmethodID EncodeMethod(ArtMethod* method) const SHARED_REQUIRES(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
return reinterpret_cast<jmethodID>(method);
@@ -176,12 +176,12 @@
protected:
explicit ScopedObjectAccessAlreadyRunnable(JNIEnv* env)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
+ REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE
: self_(ThreadForEnv(env)), env_(down_cast<JNIEnvExt*>(env)), vm_(env_->vm) {
}
explicit ScopedObjectAccessAlreadyRunnable(Thread* self)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
+ REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE
: self_(self), env_(down_cast<JNIEnvExt*>(self->GetJniEnv())),
vm_(env_ != nullptr ? env_->vm : nullptr) {
}
@@ -220,14 +220,14 @@
class ScopedObjectAccessUnchecked : public ScopedObjectAccessAlreadyRunnable {
public:
explicit ScopedObjectAccessUnchecked(JNIEnv* env)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
+ REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE
: ScopedObjectAccessAlreadyRunnable(env), tsc_(Self(), kRunnable) {
Self()->VerifyStack();
Locks::mutator_lock_->AssertSharedHeld(Self());
}
explicit ScopedObjectAccessUnchecked(Thread* self)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
+ REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE
: ScopedObjectAccessAlreadyRunnable(self), tsc_(self, kRunnable) {
Self()->VerifyStack();
Locks::mutator_lock_->AssertSharedHeld(Self());
@@ -250,13 +250,13 @@
class ScopedObjectAccess : public ScopedObjectAccessUnchecked {
public:
explicit ScopedObjectAccess(JNIEnv* env)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+ REQUIRES(!Locks::thread_suspend_count_lock_)
SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE
: ScopedObjectAccessUnchecked(env) {
}
explicit ScopedObjectAccess(Thread* self)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+ REQUIRES(!Locks::thread_suspend_count_lock_)
SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE
: ScopedObjectAccessUnchecked(self) {
}
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index 9f8c55c..6cb7950 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -133,8 +133,11 @@
DumpCmdLine(os);
- // Note: The string "ABI:" is chosen to match the format used by debuggerd.
- os << "ABI: " << GetInstructionSetString(runtime->GetInstructionSet()) << "\n";
+ // Note: The strings "Build fingerprint:" and "ABI:" are chosen to match the format used by
+ // debuggerd. This allows, for example, the stack tool to work.
+ std::string fingerprint = runtime->GetFingerprint();
+ os << "Build fingerprint: '" << (fingerprint.empty() ? "unknown" : fingerprint) << "'\n";
+ os << "ABI: '" << GetInstructionSetString(runtime->GetInstructionSet()) << "'\n";
os << "Build type: " << (kIsDebugBuild ? "debug" : "optimized") << "\n";
diff --git a/runtime/signal_catcher.h b/runtime/signal_catcher.h
index 43bbef4..de6a212 100644
--- a/runtime/signal_catcher.h
+++ b/runtime/signal_catcher.h
@@ -35,19 +35,19 @@
explicit SignalCatcher(const std::string& stack_trace_file);
~SignalCatcher();
- void HandleSigQuit() LOCKS_EXCLUDED(Locks::mutator_lock_,
- Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ void HandleSigQuit() REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+ !Locks::thread_suspend_count_lock_);
private:
- static void* Run(void* arg);
+ // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
+ static void* Run(void* arg) NO_THREAD_SAFETY_ANALYSIS;
void HandleSigUsr1();
void Output(const std::string& s);
- void SetHaltFlag(bool new_value);
- bool ShouldHalt();
- int WaitForSignal(Thread* self, SignalSet& signals);
+ void SetHaltFlag(bool new_value) REQUIRES(!lock_);
+ bool ShouldHalt() REQUIRES(!lock_);
+ int WaitForSignal(Thread* self, SignalSet& signals) REQUIRES(!lock_);
std::string stack_trace_file_;
diff --git a/runtime/stack.cc b/runtime/stack.cc
index fede91c..2916eaa 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -19,6 +19,7 @@
#include "arch/context.h"
#include "art_method-inl.h"
#include "base/hex_dump.h"
+#include "base/out.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc_map.h"
@@ -150,7 +151,7 @@
}
extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Object* StackVisitor::GetThisObject() const {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
@@ -180,7 +181,7 @@
} else {
uint16_t reg = code_item->registers_size_ - code_item->ins_size_;
uint32_t value = 0;
- bool success = GetVReg(m, reg, kReferenceVReg, &value);
+ bool success = GetVReg(m, reg, kReferenceVReg, outof(value));
// We currently always guarantee the `this` object is live throughout the method.
CHECK(success) << "Failed to read the this object in " << PrettyMethod(m);
return reinterpret_cast<mirror::Object*>(value);
@@ -375,8 +376,8 @@
QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
uint32_t vmap_offset_lo, vmap_offset_hi;
// TODO: IsInContext stops before spotting floating point registers.
- if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
- vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) {
+ if (vmap_table.IsInContext(vreg, kind_lo, outof(vmap_offset_lo)) &&
+ vmap_table.IsInContext(vreg + 1, kind_hi, outof(vmap_offset_hi))) {
bool is_float = (kind_lo == kDoubleLoVReg);
uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo);
@@ -399,8 +400,8 @@
uint64_t* val) const {
uint32_t low_32bits;
uint32_t high_32bits;
- bool success = GetVRegFromOptimizedCode(m, vreg, kind_lo, &low_32bits);
- success &= GetVRegFromOptimizedCode(m, vreg + 1, kind_hi, &high_32bits);
+ bool success = GetVRegFromOptimizedCode(m, vreg, kind_lo, outof(low_32bits));
+ success &= GetVRegFromOptimizedCode(m, vreg + 1, kind_hi, outof(high_32bits));
if (success) {
*val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits);
}
@@ -452,7 +453,7 @@
QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
uint32_t vmap_offset;
// TODO: IsInContext stops before spotting floating point registers.
- if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
+ if (vmap_table.IsInContext(vreg, kind, outof(vmap_offset))) {
bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind);
@@ -532,8 +533,8 @@
QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
uint32_t vmap_offset_lo, vmap_offset_hi;
// TODO: IsInContext stops before spotting floating point registers.
- if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
- vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) {
+ if (vmap_table.IsInContext(vreg, kind_lo, outof(vmap_offset_lo)) &&
+ vmap_table.IsInContext(vreg + 1, kind_hi, outof(vmap_offset_hi))) {
bool is_float = (kind_lo == kDoubleLoVReg);
uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo);
@@ -655,7 +656,7 @@
next_dex_pc_(0) {
}
- bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
if (found_frame_) {
ArtMethod* method = GetMethod();
if (method != nullptr && !method->IsRuntimeMethod()) {
@@ -688,7 +689,7 @@
explicit DescribeStackVisitor(Thread* thread_in)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
return true;
}
diff --git a/runtime/stack.h b/runtime/stack.h
index d60714f..8023de1 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -155,7 +155,7 @@
// If this returns non-null then this does not mean the vreg is currently a reference
// on non-moving collectors. Check that the raw reg with GetVReg is equal to this if not certain.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- mirror::Object* GetVRegReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* GetVRegReference(size_t i) const SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK_LT(i, NumberOfVRegs());
mirror::Object* ref;
if (HasReferenceArray()) {
@@ -229,7 +229,7 @@
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetVRegReference(size_t i, mirror::Object* val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetVRegReference(size_t i, mirror::Object* val) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK_LT(i, NumberOfVRegs());
if (kVerifyFlags & kVerifyWrites) {
VerifyObject(val);
@@ -244,14 +244,14 @@
}
}
- ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(method_ != nullptr);
return method_;
}
- mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* GetThisObject() const SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_REQUIRES(Locks::mutator_lock_);
bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const {
if (HasReferenceArray()) {
@@ -333,7 +333,7 @@
: RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) {
}
virtual void Describe(std::ostream& os) const OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
const StackVisitor* const stack_visitor_;
@@ -410,7 +410,7 @@
return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_);
}
- size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t NumJniShadowFrameReferences() const SHARED_REQUIRES(Locks::mutator_lock_);
bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const;
@@ -431,31 +431,31 @@
protected:
StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
public:
virtual ~StackVisitor() {}
// Return 'true' if we should continue to visit more frames, 'false' to stop.
- virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ virtual bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) = 0;
void WalkStack(bool include_transitions = false)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_);
bool IsShadowFrame() const {
return cur_shadow_frame_ != nullptr;
}
- uint32_t GetDexPc(bool abort_on_failure = true) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t GetDexPc(bool abort_on_failure = true) const SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* GetThisObject() const SHARED_REQUIRES(Locks::mutator_lock_);
- size_t GetNativePcOffset() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t GetNativePcOffset() const SHARED_REQUIRES(Locks::mutator_lock_);
uintptr_t* CalleeSaveAddress(int num, size_t frame_size) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Callee saves are held at the top of the frame
DCHECK(GetMethod() != nullptr);
uint8_t* save_addr =
@@ -467,46 +467,46 @@
}
// Returns the height of the stack in the managed stack frames, including transitions.
- size_t GetFrameHeight() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t GetFrameHeight() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetNumFrames() - cur_depth_ - 1;
}
// Returns a frame ID for JDWP use, starting from 1.
- size_t GetFrameId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t GetFrameId() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFrameHeight() + 1;
}
- size_t GetNumFrames() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t GetNumFrames() SHARED_REQUIRES(Locks::mutator_lock_) {
if (num_frames_ == 0) {
num_frames_ = ComputeNumFrames(thread_, walk_kind_);
}
return num_frames_;
}
- size_t GetFrameDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t GetFrameDepth() SHARED_REQUIRES(Locks::mutator_lock_) {
return cur_depth_;
}
// Get the method and dex pc immediately after the one that's currently being visited.
bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool IsReferenceVReg(ArtMethod* m, uint16_t vreg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value,
VRegKind kind_lo, VRegKind kind_hi)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
uintptr_t* GetGPRAddress(uint32_t reg) const;
@@ -522,9 +522,9 @@
return reinterpret_cast<uint32_t*>(vreg_addr);
}
- uintptr_t GetReturnPc() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uintptr_t GetReturnPc() const SHARED_REQUIRES(Locks::mutator_lock_);
- void SetReturnPc(uintptr_t new_ret_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetReturnPc(uintptr_t new_ret_pc) SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Return sp-relative offset for a Dalvik virtual register, compiler
@@ -606,17 +606,17 @@
return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size);
}
- std::string DescribeLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string DescribeLocation() const SHARED_REQUIRES(Locks::mutator_lock_);
static size_t ComputeNumFrames(Thread* thread, StackWalkKind walk_kind)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- static void DescribeStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void DescribeStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_);
private:
// Private constructor known in the case that num_frames_ has already been computed.
StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, size_t num_frames)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool IsAccessibleRegister(uint32_t reg, bool is_float) const {
return is_float ? IsAccessibleFPR(reg) : IsAccessibleGPR(reg);
@@ -644,40 +644,40 @@
bool GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
VRegKind kind_hi, uint64_t* val) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, VRegKind kind_lo,
uint64_t* val) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t new_value,
VRegKind kind)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRegKind kind)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool SetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, uint64_t new_value,
VRegKind kind_lo, VRegKind kind_hi)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool SetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, uint64_t new_value,
bool is_float)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SanityCheckFrame() const SHARED_REQUIRES(Locks::mutator_lock_);
- InlineInfo GetCurrentInlineInfo() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ InlineInfo GetCurrentInlineInfo() const SHARED_REQUIRES(Locks::mutator_lock_);
Thread* const thread_;
const StackWalkKind walk_kind_;
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 39ef68a..8bf241b 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -19,6 +19,10 @@
#include "thread.h"
+#ifdef __ANDROID__
+#include <bionic_tls.h> // Access to our own TLS slot.
+#endif
+
#include <pthread.h>
#include "base/casts.h"
@@ -41,7 +45,11 @@
if (!is_started_) {
return nullptr;
} else {
+#ifdef __ANDROID__
+ void* thread = __get_tls()[TLS_SLOT_ART_THREAD_SELF];
+#else
void* thread = pthread_getspecific(Thread::pthread_key_self_);
+#endif
return reinterpret_cast<Thread*>(thread);
}
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index a2edfa3..ba1121f 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -527,7 +527,11 @@
InitCardTable();
InitTid();
+#ifdef __ANDROID__
+ __get_tls()[TLS_SLOT_ART_THREAD_SELF] = this;
+#else
CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
+#endif
DCHECK_EQ(Thread::Current(), this);
tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this);
@@ -1159,7 +1163,7 @@
struct StackDumpVisitor : public StackVisitor {
StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
os(os_in),
thread(thread_in),
@@ -1175,7 +1179,7 @@
}
}
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
return true;
@@ -1223,7 +1227,7 @@
}
static void DumpLockedObject(mirror::Object* o, void* context)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::ostream& os = *reinterpret_cast<std::ostream*>(context);
os << " - locked ";
if (o == nullptr) {
@@ -1255,7 +1259,7 @@
};
static bool ShouldShowNativeStack(const Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ThreadState state = thread->GetState();
// In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
@@ -1349,7 +1353,11 @@
LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's "
"going to use a pthread_key_create destructor?): " << *self;
CHECK(is_started_);
+#ifdef __ANDROID__
+ __get_tls()[TLS_SLOT_ART_THREAD_SELF] = self;
+#else
CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
+#endif
self->tls32_.thread_exit_check_count = 1;
} else {
LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
@@ -1760,11 +1768,11 @@
class CountStackDepthVisitor : public StackVisitor {
public:
explicit CountStackDepthVisitor(Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
depth_(0), skip_depth_(0), skipping_(true) {}
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
// We want to skip frames up to and including the exception's constructor.
// Note we also skip the frame if it doesn't have a method (namely the callee
// save frame)
@@ -1808,29 +1816,26 @@
trace_(nullptr),
pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {}
- bool Init(int depth)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool Init(int depth) SHARED_REQUIRES(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) {
// Allocate method trace with format [method pointers][pcs].
auto* cl = Runtime::Current()->GetClassLinker();
trace_ = cl->AllocPointerArray(self_, depth * 2);
+ const char* last_no_suspend_cause =
+ self_->StartAssertNoThreadSuspension("Building internal stack trace");
if (trace_ == nullptr) {
self_->AssertPendingOOMException();
return false;
}
// If We are called from native, use non-transactional mode.
- const char* last_no_suspend_cause =
- self_->StartAssertNoThreadSuspension("Building internal stack trace");
CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause;
return true;
}
- virtual ~BuildInternalStackTraceVisitor() {
- if (trace_ != nullptr) {
- self_->EndAssertNoThreadSuspension(nullptr);
- }
+ virtual ~BuildInternalStackTraceVisitor() RELEASE(Roles::uninterruptible_) {
+ self_->EndAssertNoThreadSuspension(nullptr);
}
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
if (trace_ == nullptr) {
return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError.
}
@@ -2012,7 +2017,7 @@
}
static mirror::ClassLoader* GetCurrentClassLoader(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* method = self->GetCurrentMethod(nullptr);
return method != nullptr
? method->GetDeclaringClass()->GetClassLoader()
@@ -2142,7 +2147,7 @@
std::string str(ss.str());
// log to stderr for debugging command line processes
std::cerr << str;
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
// log to logcat for debugging frameworks processes
LOG(INFO) << str;
#endif
@@ -2307,6 +2312,7 @@
QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer)
QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder)
QUICK_ENTRY_POINT_INFO(pReadBarrierJni)
+ QUICK_ENTRY_POINT_INFO(pReadBarrierSlow)
#undef QUICK_ENTRY_POINT_INFO
os << offset;
@@ -2345,13 +2351,13 @@
// so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
struct CurrentMethodVisitor FINAL : public StackVisitor {
CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
this_object_(nullptr),
method_(nullptr),
dex_pc_(0),
abort_on_error_(abort_on_error) {}
- bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
// Continue if this is a runtime method.
@@ -2391,13 +2397,13 @@
class ReferenceMapVisitor : public StackVisitor {
public:
ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
// We are visiting the references in compiled frames, so we do not need
// to know the inlined frames.
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames),
visitor_(visitor) {}
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
if (false) {
LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
<< StringPrintf("@ PC:%04x", GetDexPc());
@@ -2411,7 +2417,7 @@
return true;
}
- void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = shadow_frame->GetMethod();
DCHECK(m != nullptr);
size_t num_regs = shadow_frame->NumberOfVRegs();
@@ -2453,7 +2459,7 @@
}
private:
- void VisitQuickFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void VisitQuickFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
auto* cur_quick_frame = GetCurrentQuickFrame();
DCHECK(cur_quick_frame != nullptr);
auto* m = *cur_quick_frame;
@@ -2557,7 +2563,7 @@
RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {}
void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg));
}
@@ -2620,7 +2626,7 @@
class VerifyRootVisitor : public SingleRootVisitor {
public:
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
VerifyObject(root);
}
};
diff --git a/runtime/thread.h b/runtime/thread.h
index cf87f22..e4ad7f3 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -162,20 +162,18 @@
static Thread* Current();
// On a runnable thread, check for pending thread suspension request and handle if pending.
- void AllowThreadSuspension() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void AllowThreadSuspension() SHARED_REQUIRES(Locks::mutator_lock_);
// Process pending thread suspension request and handle if pending.
- void CheckSuspend() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CheckSuspend() SHARED_REQUIRES(Locks::mutator_lock_);
static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
mirror::Object* thread_peer)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Translates 172 to pAllocArrayFromCode and so on.
template<size_t size_of_pointers>
@@ -186,18 +184,18 @@
// Dumps the detailed thread state and the thread stack (used for SIGQUIT).
void Dump(std::ostream& os) const
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_suspend_count_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void DumpJavaStack(std::ostream& os) const
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_suspend_count_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
// case we use 'tid' to identify the thread, and we'll include as much information as we can.
static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_suspend_count_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
ThreadState GetState() const {
DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
@@ -207,11 +205,11 @@
ThreadState SetState(ThreadState new_state);
- int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
+ int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
return tls32_.suspend_count;
}
- int GetDebugSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
+ int GetDebugSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
return tls32_.debug_suspend_count;
}
@@ -223,10 +221,10 @@
}
bool ModifySuspendCount(Thread* self, int delta, AtomicInteger* suspend_barrier, bool for_debugger)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
+ REQUIRES(Locks::thread_suspend_count_lock_);
bool RequestCheckpoint(Closure* function)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
+ REQUIRES(Locks::thread_suspend_count_lock_);
void SetFlipFunction(Closure* function);
Closure* GetFlipFunction();
@@ -243,24 +241,25 @@
// Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
// mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
void FullSuspendCheck()
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_suspend_count_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Transition from non-runnable to runnable state acquiring share on mutator_lock_.
ThreadState TransitionFromSuspendedToRunnable()
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+ REQUIRES(!Locks::thread_suspend_count_lock_)
SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
ALWAYS_INLINE;
// Transition from runnable into a state where mutator privileges are denied. Releases share of
// mutator lock.
void TransitionFromRunnableToSuspended(ThreadState new_state)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+ REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
UNLOCK_FUNCTION(Locks::mutator_lock_)
ALWAYS_INLINE;
// Once called thread suspension will cause an assertion failure.
- const char* StartAssertNoThreadSuspension(const char* cause) {
+ const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
+ Roles::uninterruptible_.Acquire(); // No-op.
if (kIsDebugBuild) {
CHECK(cause != nullptr);
const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
@@ -273,13 +272,14 @@
}
// End region where no thread suspension is expected.
- void EndAssertNoThreadSuspension(const char* old_cause) {
+ void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) {
if (kIsDebugBuild) {
CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
CHECK_GT(tls32_.no_thread_suspension, 0U);
tls32_.no_thread_suspension--;
tlsPtr_.last_no_thread_suspension_cause = old_cause;
}
+ Roles::uninterruptible_.Release(); // No-op.
}
void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
@@ -290,7 +290,7 @@
size_t NumberOfHeldMutexes() const;
- bool HoldsLock(mirror::Object*) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool HoldsLock(mirror::Object*) const SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Changes the priority of this thread to match that of the java.lang.Thread object.
@@ -318,19 +318,19 @@
// Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
// allocation, or locking.
void GetThreadName(std::string& name) const;
// Sets the thread's name.
- void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetThreadName(const char* name) SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
uint64_t GetCpuMicroTime() const;
- mirror::Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* GetPeer() const SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(tlsPtr_.jpeer == nullptr);
return tlsPtr_.opeer;
}
@@ -349,28 +349,28 @@
return tlsPtr_.exception != nullptr;
}
- mirror::Throwable* GetException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Throwable* GetException() const SHARED_REQUIRES(Locks::mutator_lock_) {
return tlsPtr_.exception;
}
void AssertPendingException() const;
- void AssertPendingOOMException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void AssertPendingOOMException() const SHARED_REQUIRES(Locks::mutator_lock_);
void AssertNoPendingException() const;
void AssertNoPendingExceptionForNewException(const char* msg) const;
void SetException(mirror::Throwable* new_exception)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(new_exception != nullptr);
// TODO: DCHECK(!IsExceptionPending());
tlsPtr_.exception = new_exception;
}
- void ClearException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void ClearException() SHARED_REQUIRES(Locks::mutator_lock_) {
tlsPtr_.exception = nullptr;
}
// Find catch block and perform long jump to appropriate exception handle
- NO_RETURN void QuickDeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ NO_RETURN void QuickDeliverException() SHARED_REQUIRES(Locks::mutator_lock_);
Context* GetLongJumpContext();
void ReleaseLongJumpContext(Context* context) {
@@ -392,12 +392,12 @@
// Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
// abort the runtime iff abort_on_error is true.
ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns whether the given exception was thrown by the current Java method being executed
// (Note that this includes native Java methods).
bool IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void SetTopOfStack(ArtMethod** top_method) {
tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
@@ -414,23 +414,24 @@
// If 'msg' is null, no detail message is set.
void ThrowNewException(const char* exception_class_descriptor, const char* msg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// If 'msg' is null, no detail message is set. An exception must be pending, and will be
// used as the new exception's cause.
void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
__attribute__((format(printf, 3, 4)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// OutOfMemoryError is special, because we need to pre-allocate an instance.
// Only the GC should call this.
- void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ThrowOutOfMemoryError(const char* msg) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
static void Startup();
static void FinishStartup();
@@ -442,50 +443,49 @@
}
// Convert a jobject into a Object*
- mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* DecodeJObject(jobject obj) const SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Object* GetMonitorEnterObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* GetMonitorEnterObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
return tlsPtr_.monitor_enter_object;
}
- void SetMonitorEnterObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetMonitorEnterObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
tlsPtr_.monitor_enter_object = obj;
}
// Implements java.lang.Thread.interrupted.
- bool Interrupted() LOCKS_EXCLUDED(wait_mutex_);
+ bool Interrupted() REQUIRES(!*wait_mutex_);
// Implements java.lang.Thread.isInterrupted.
- bool IsInterrupted() LOCKS_EXCLUDED(wait_mutex_);
- bool IsInterruptedLocked() EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
+ bool IsInterrupted() REQUIRES(!*wait_mutex_);
+ bool IsInterruptedLocked() REQUIRES(wait_mutex_) {
return interrupted_;
}
- void Interrupt(Thread* self) LOCKS_EXCLUDED(wait_mutex_);
- void SetInterruptedLocked(bool i) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
+ void Interrupt(Thread* self) REQUIRES(!*wait_mutex_);
+ void SetInterruptedLocked(bool i) REQUIRES(wait_mutex_) {
interrupted_ = i;
}
- void Notify() LOCKS_EXCLUDED(wait_mutex_);
+ void Notify() REQUIRES(!*wait_mutex_);
private:
- void NotifyLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_);
+ void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
public:
Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
return wait_mutex_;
}
- ConditionVariable* GetWaitConditionVariable() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
+ ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
return wait_cond_;
}
- Monitor* GetWaitMonitor() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
+ Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
return wait_monitor_;
}
- void SetWaitMonitor(Monitor* mon) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
+ void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
wait_monitor_ = mon;
}
-
// Waiter link-list support.
Thread* GetWaitNext() const {
return tlsPtr_.wait_next;
@@ -505,7 +505,7 @@
// and space efficient to compute than the StackTraceElement[].
template<bool kTransactionActive>
jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
// StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
@@ -514,11 +514,11 @@
static jobjectArray InternalStackTraceToStackTraceElementArray(
const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
jobjectArray output_array = nullptr, int* stack_depth = nullptr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE void VerifyStack() SHARED_REQUIRES(Locks::mutator_lock_);
//
// Offsets of various members of native Thread class, used by compiled code.
@@ -649,7 +649,7 @@
}
// Set the stack end to that to be used during a stack overflow
- void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetStackEndForStackOverflow() SHARED_REQUIRES(Locks::mutator_lock_);
// Set the stack end to that to be used during regular execution
void ResetDefaultStackEnd() {
@@ -712,7 +712,7 @@
}
// Number of references allocated in JNI ShadowFrames on this thread.
- size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t NumJniShadowFrameReferences() const SHARED_REQUIRES(Locks::mutator_lock_) {
return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
}
@@ -720,7 +720,7 @@
size_t NumHandleReferences();
// Number of references allocated in handle scopes & JNI shadow frames on this thread.
- size_t NumStackReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t NumStackReferences() SHARED_REQUIRES(Locks::mutator_lock_) {
return NumHandleReferences() + NumJniShadowFrameReferences();
}
@@ -728,7 +728,7 @@
bool HandleScopeContains(jobject obj) const;
void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
HandleScope* GetTopHandleScope() {
return tlsPtr_.top_handle_scope;
@@ -867,10 +867,10 @@
void RunCheckpointFunction();
bool PassActiveSuspendBarriers(Thread* self)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_suspend_count_lock_);
void ClearSuspendBarrier(AtomicInteger* target)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
+ REQUIRES(Locks::thread_suspend_count_lock_);
bool ReadFlag(ThreadFlag flag) const {
return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
@@ -920,7 +920,7 @@
// Push an object onto the allocation stack.
bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Set the thread local allocation pointers to the given pointers.
void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
@@ -974,8 +974,7 @@
private:
explicit Thread(bool daemon);
- ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
- Locks::thread_suspend_count_lock_);
+ ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
void Destroy();
void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
@@ -983,7 +982,7 @@
template<bool kTransactionActive>
void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
jobject thread_name, jint thread_priority)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
// Dbg::Disconnected.
@@ -998,23 +997,23 @@
return old_state;
}
- void VerifyStackImpl() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VerifyStackImpl() SHARED_REQUIRES(Locks::mutator_lock_);
- void DumpState(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void DumpState(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_);
void DumpStack(std::ostream& os) const
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_suspend_count_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Out-of-line conveniences for debugging in gdb.
static Thread* CurrentFromGdb(); // Like Thread::Current.
// Like Thread::Dump(std::cerr).
- void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void DumpFromGdb() const SHARED_REQUIRES(Locks::mutator_lock_);
static void* CreateCallback(void* arg);
void HandleUncaughtExceptions(ScopedObjectAccess& soa)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_REQUIRES(Locks::mutator_lock_);
// Initialize a thread.
//
@@ -1024,7 +1023,7 @@
// create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
// of false).
bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
+ REQUIRES(Locks::runtime_shutdown_lock_);
void InitCardTable();
void InitCpu();
void CleanupCpu();
@@ -1346,12 +1345,12 @@
DISALLOW_COPY_AND_ASSIGN(Thread);
};
-class ScopedAssertNoThreadSuspension {
+class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
public:
- ScopedAssertNoThreadSuspension(Thread* self, const char* cause)
+ ScopedAssertNoThreadSuspension(Thread* self, const char* cause) ACQUIRE(Roles::uninterruptible_)
: self_(self), old_cause_(self->StartAssertNoThreadSuspension(cause)) {
}
- ~ScopedAssertNoThreadSuspension() {
+ ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) {
self_->EndAssertNoThreadSuspension(old_cause_);
}
Thread* Self() {
diff --git a/runtime/thread_linux.cc b/runtime/thread_linux.cc
index 9d54eba..9563b99 100644
--- a/runtime/thread_linux.cc
+++ b/runtime/thread_linux.cc
@@ -44,7 +44,7 @@
void Thread::SetUpAlternateSignalStack() {
// Create and set an alternate signal stack.
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
LOG(FATAL) << "Invalid use of alternate signal stack on Android";
#endif
stack_t ss;
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 60c9b5e..62d1e84 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -1228,7 +1228,11 @@
// Clear the TLS data, so that the underlying native thread is recognizably detached.
// (It may wish to reattach later.)
+#ifdef __ANDROID__
+ __get_tls()[TLS_SLOT_ART_THREAD_SELF] = nullptr;
+#else
CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, nullptr), "detach self");
+#endif
// Signal that a thread just detached.
MutexLock mu(nullptr, *Locks::thread_list_lock_);
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index edd1e05..4c50181 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -46,27 +46,25 @@
~ThreadList();
void DumpForSigQuit(std::ostream& os)
- LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::mutator_lock_);
// For thread suspend timeout dumps.
void Dump(std::ostream& os)
- LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
pid_t GetLockOwner(); // For SignalCatcher.
// Thread suspension support.
void ResumeAll()
UNLOCK_FUNCTION(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
void Resume(Thread* thread, bool for_debugger = false)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_suspend_count_lock_);
// Suspends all threads and gets exclusive access to the mutator_lock_.
// If long suspend is true, then other people who try to suspend will never timeout. Long suspend
// is currenly used for hprof since large heaps take a long time.
void SuspendAll(const char* cause, bool long_suspend = false)
EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
// Suspend a thread using a peer, typically used by the debugger. Returns the thread on success,
@@ -76,18 +74,16 @@
// is set to true.
Thread* SuspendThreadByPeer(jobject peer, bool request_suspension, bool debug_suspension,
bool* timed_out)
- LOCKS_EXCLUDED(Locks::mutator_lock_,
- Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+ !Locks::thread_suspend_count_lock_);
// Suspend a thread using its thread id, typically used by lock/monitor inflation. Returns the
// thread on success else null. The thread id is used to identify the thread to avoid races with
// the thread terminating. Note that as thread ids are recycled this may not suspend the expected
// thread, that may be terminating. If the suspension times out then *timeout is set to true.
Thread* SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, bool* timed_out)
- LOCKS_EXCLUDED(Locks::mutator_lock_,
- Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+ !Locks::thread_suspend_count_lock_);
// Find an already suspended thread (or self) by its id.
Thread* FindThreadByThreadId(uint32_t thin_lock_id);
@@ -95,87 +91,78 @@
// Run a checkpoint on threads, running threads are not suspended but run the checkpoint inside
// of the suspend check. Returns how many checkpoints we should expect to run.
size_t RunCheckpoint(Closure* checkpoint_function)
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
size_t RunCheckpointOnRunnableThreads(Closure* checkpoint_function)
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
// Flip thread roots from from-space refs to to-space refs. Used by
// the concurrent copying collector.
size_t FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
gc::collector::GarbageCollector* collector)
- LOCKS_EXCLUDED(Locks::mutator_lock_,
- Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+ !Locks::thread_suspend_count_lock_);
// Suspends all threads
void SuspendAllForDebugger()
- LOCKS_EXCLUDED(Locks::mutator_lock_,
- Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+ !Locks::thread_suspend_count_lock_);
void SuspendSelfForDebugger()
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_suspend_count_lock_);
// Resume all threads
void ResumeAllForDebugger()
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
void UndoDebuggerSuspensions()
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
// Iterates over all the threads.
void ForEach(void (*callback)(Thread*, void*), void* context)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
+ REQUIRES(Locks::thread_list_lock_);
// Add/remove current thread from list.
void Register(Thread* self)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_)
- LOCKS_EXCLUDED(Locks::mutator_lock_, Locks::thread_list_lock_);
- void Unregister(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_, Locks::thread_list_lock_);
+ REQUIRES(Locks::runtime_shutdown_lock_, !Locks::mutator_lock_, !Locks::thread_list_lock_,
+ !Locks::thread_suspend_count_lock_);
+ void Unregister(Thread* self) REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+ !Locks::thread_suspend_count_lock_);
void VisitRoots(RootVisitor* visitor) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Return a copy of the thread list.
- std::list<Thread*> GetList() EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) {
+ std::list<Thread*> GetList() REQUIRES(Locks::thread_list_lock_) {
return list_;
}
void DumpNativeStacks(std::ostream& os)
- LOCKS_EXCLUDED(Locks::thread_list_lock_);
+ REQUIRES(!Locks::thread_list_lock_);
private:
uint32_t AllocThreadId(Thread* self);
- void ReleaseThreadId(Thread* self, uint32_t id) LOCKS_EXCLUDED(Locks::allocated_thread_ids_lock_);
+ void ReleaseThreadId(Thread* self, uint32_t id) REQUIRES(!Locks::allocated_thread_ids_lock_);
- bool Contains(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
- bool Contains(pid_t tid) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
+ bool Contains(Thread* thread) REQUIRES(Locks::thread_list_lock_);
+ bool Contains(pid_t tid) REQUIRES(Locks::thread_list_lock_);
size_t RunCheckpoint(Closure* checkpoint_function, bool includeSuspended)
- LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
void DumpUnattachedThreads(std::ostream& os)
- LOCKS_EXCLUDED(Locks::thread_list_lock_);
+ REQUIRES(!Locks::thread_list_lock_);
void SuspendAllDaemonThreads()
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
void WaitForOtherNonDaemonThreadsToExit()
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
void SuspendAllInternal(Thread* self, Thread* ignore1, Thread* ignore2 = nullptr,
bool debug_suspend = false)
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
void AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2 = nullptr)
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
std::bitset<kMaxThreadId> allocated_ids_ GUARDED_BY(Locks::allocated_thread_ids_lock_);
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 88700e6..1ca0a21 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -61,7 +61,7 @@
protected:
ThreadPoolWorker(ThreadPool* thread_pool, const std::string& name, size_t stack_size);
- static void* Callback(void* arg) LOCKS_EXCLUDED(Locks::mutator_lock_);
+ static void* Callback(void* arg) REQUIRES(!Locks::mutator_lock_);
virtual void Run();
ThreadPool* const thread_pool_;
@@ -82,22 +82,22 @@
}
// Broadcast to the workers and tell them to empty out the work queue.
- void StartWorkers(Thread* self);
+ void StartWorkers(Thread* self) REQUIRES(!task_queue_lock_);
// Do not allow workers to grab any new tasks.
- void StopWorkers(Thread* self);
+ void StopWorkers(Thread* self) REQUIRES(!task_queue_lock_);
// Add a new task, the first available started worker will process it. Does not delete the task
// after running it, it is the caller's responsibility.
- void AddTask(Thread* self, Task* task);
+ void AddTask(Thread* self, Task* task) REQUIRES(!task_queue_lock_);
explicit ThreadPool(const char* name, size_t num_threads);
virtual ~ThreadPool();
// Wait for all tasks currently on queue to get completed.
- void Wait(Thread* self, bool do_work, bool may_hold_locks);
+ void Wait(Thread* self, bool do_work, bool may_hold_locks) REQUIRES(!task_queue_lock_);
- size_t GetTaskCount(Thread* self);
+ size_t GetTaskCount(Thread* self) REQUIRES(!task_queue_lock_);
// Returns the total amount of workers waited for tasks.
uint64_t GetWaitTime() const {
@@ -106,18 +106,18 @@
// Provides a way to bound the maximum number of worker threads, threads must be less the the
// thread count of the thread pool.
- void SetMaxActiveWorkers(size_t threads);
+ void SetMaxActiveWorkers(size_t threads) REQUIRES(!task_queue_lock_);
protected:
// get a task to run, blocks if there are no tasks left
- virtual Task* GetTask(Thread* self);
+ virtual Task* GetTask(Thread* self) REQUIRES(!task_queue_lock_);
// Try to get a task, returning null if there is none available.
- Task* TryGetTask(Thread* self);
- Task* TryGetTaskLocked() EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_);
+ Task* TryGetTask(Thread* self) REQUIRES(!task_queue_lock_);
+ Task* TryGetTaskLocked() REQUIRES(task_queue_lock_);
// Are we shutting down?
- bool IsShuttingDown() const EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_) {
+ bool IsShuttingDown() const REQUIRES(task_queue_lock_) {
return shutting_down_;
}
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 487baed..4393430 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -57,7 +57,7 @@
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
method_trace_(Trace::AllocStackTrace()) {}
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
// Ignore runtime frames (in particular callee save).
if (!m->IsRuntimeMethod()) {
@@ -218,7 +218,7 @@
*buf++ = static_cast<uint8_t>(val >> 56);
}
-static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+static void GetSample(Thread* thread, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
BuildStackTraceVisitor build_trace_visitor(thread);
build_trace_visitor.WalkStack();
std::vector<ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace();
@@ -636,7 +636,7 @@
static void GetVisitedMethodsFromBitSets(
const std::map<const DexFile*, DexIndexBitSet*>& seen_methods,
- std::set<ArtMethod*>* visited_methods) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ std::set<ArtMethod*>* visited_methods) SHARED_REQUIRES(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
for (auto& e : seen_methods) {
DexIndexBitSet* bit_set = e.second;
@@ -749,7 +749,7 @@
void Trace::FieldRead(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc, ArtField* field)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
UNUSED(thread, this_object, method, dex_pc, field);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc;
@@ -758,7 +758,7 @@
void Trace::FieldWritten(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc, ArtField* field,
const JValue& field_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
UNUSED(thread, this_object, method, dex_pc, field, field_value);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc;
@@ -793,14 +793,14 @@
}
void Trace::ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
UNUSED(thread, exception_object);
LOG(ERROR) << "Unexpected exception caught event in tracing";
}
void Trace::BackwardBranch(Thread* /*thread*/, ArtMethod* method,
int32_t /*dex_pc_offset*/)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected backward branch event in tracing" << PrettyMethod(method);
}
diff --git a/runtime/trace.h b/runtime/trace.h
index 69e6acc..04be3dd 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -114,28 +114,20 @@
static void Start(const char* trace_filename, int trace_fd, size_t buffer_size, int flags,
TraceOutputMode output_mode, TraceMode trace_mode, int interval_us)
- LOCKS_EXCLUDED(Locks::mutator_lock_,
- Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_,
- Locks::trace_lock_);
- static void Pause() LOCKS_EXCLUDED(Locks::trace_lock_, Locks::thread_list_lock_);
- static void Resume() LOCKS_EXCLUDED(Locks::trace_lock_);
+ REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
+ !Locks::trace_lock_);
+ static void Pause() REQUIRES(!Locks::trace_lock_, !Locks::thread_list_lock_);
+ static void Resume() REQUIRES(!Locks::trace_lock_);
// Stop tracing. This will finish the trace and write it to file/send it via DDMS.
static void Stop()
- LOCKS_EXCLUDED(Locks::mutator_lock_,
- Locks::thread_list_lock_,
- Locks::trace_lock_);
+ REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_);
// Abort tracing. This will just stop tracing and *not* write/send the collected data.
static void Abort()
- LOCKS_EXCLUDED(Locks::mutator_lock_,
- Locks::thread_list_lock_,
- Locks::trace_lock_);
+ REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_);
static void Shutdown()
- LOCKS_EXCLUDED(Locks::mutator_lock_,
- Locks::thread_list_lock_,
- Locks::trace_lock_);
- static TracingMode GetMethodTracingMode() LOCKS_EXCLUDED(Locks::trace_lock_);
+ REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_);
+ static TracingMode GetMethodTracingMode() REQUIRES(!Locks::trace_lock_);
bool UseWallClock();
bool UseThreadCpuClock();
@@ -143,33 +135,37 @@
uint32_t GetClockOverheadNanoSeconds();
void CompareAndUpdateStackTrace(Thread* thread, std::vector<ArtMethod*>* stack_trace)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
// InstrumentationListener implementation.
void MethodEntered(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+ OVERRIDE;
void MethodExited(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
const JValue& return_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+ OVERRIDE;
void MethodUnwind(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+ OVERRIDE;
void DexPcMoved(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t new_dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+ OVERRIDE;
void FieldRead(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc, ArtField* field)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
void FieldWritten(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc, ArtField* field,
const JValue& field_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
// Reuse an old stack trace if it exists, otherwise allocate a new one.
static std::vector<ArtMethod*>* AllocStackTrace();
// Clear and store an old stack trace for later use.
@@ -177,57 +173,61 @@
// Save id and name of a thread before it exits.
static void StoreExitingThreadInfo(Thread* thread);
- static TraceOutputMode GetOutputMode() LOCKS_EXCLUDED(Locks::trace_lock_);
- static TraceMode GetMode() LOCKS_EXCLUDED(Locks::trace_lock_);
- static size_t GetBufferSize() LOCKS_EXCLUDED(Locks::trace_lock_);
+ static TraceOutputMode GetOutputMode() REQUIRES(!Locks::trace_lock_);
+ static TraceMode GetMode() REQUIRES(!Locks::trace_lock_);
+ static size_t GetBufferSize() REQUIRES(!Locks::trace_lock_);
private:
Trace(File* trace_file, const char* trace_name, size_t buffer_size, int flags,
TraceOutputMode output_mode, TraceMode trace_mode);
// The sampling interval in microseconds is passed as an argument.
- static void* RunSamplingThread(void* arg) LOCKS_EXCLUDED(Locks::trace_lock_);
+ static void* RunSamplingThread(void* arg) REQUIRES(!Locks::trace_lock_);
static void StopTracing(bool finish_tracing, bool flush_file)
- LOCKS_EXCLUDED(Locks::mutator_lock_,
- Locks::thread_list_lock_,
- Locks::trace_lock_);
- void FinishTracing() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_)
+ // There is an annoying issue with static functions that create a new object and call into
+ // that object that causes them to not be able to tell that we don't currently hold the lock.
+ // This causes the negative annotations to incorrectly have a false positive. TODO: Figure out
+ // how to annotate this.
+ NO_THREAD_SAFETY_ANALYSIS;
+ void FinishTracing() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
void ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff);
void LogMethodTraceEvent(Thread* thread, ArtMethod* method,
instrumentation::Instrumentation::InstrumentationEvent event,
uint32_t thread_clock_diff, uint32_t wall_clock_diff)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
// Methods to output traced methods and threads.
- void GetVisitedMethods(size_t end_offset, std::set<ArtMethod*>* visited_methods);
+ void GetVisitedMethods(size_t end_offset, std::set<ArtMethod*>* visited_methods)
+ REQUIRES(!*unique_methods_lock_);
void DumpMethodList(std::ostream& os, const std::set<ArtMethod*>& visited_methods)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void DumpThreadList(std::ostream& os) LOCKS_EXCLUDED(Locks::thread_list_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
+ void DumpThreadList(std::ostream& os) REQUIRES(!Locks::thread_list_lock_);
// Methods to register seen entitites in streaming mode. The methods return true if the entity
// is newly discovered.
bool RegisterMethod(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(streaming_lock_);
bool RegisterThread(Thread* thread)
- EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_);
+ REQUIRES(streaming_lock_);
// Copy a temporary buffer to the main buffer. Used for streaming. Exposed here for lock
// annotation.
void WriteToBuf(const uint8_t* src, size_t src_size)
- EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_);
+ REQUIRES(streaming_lock_);
- uint32_t EncodeTraceMethod(ArtMethod* method) LOCKS_EXCLUDED(unique_methods_lock_);
+ uint32_t EncodeTraceMethod(ArtMethod* method) REQUIRES(!*unique_methods_lock_);
uint32_t EncodeTraceMethodAndAction(ArtMethod* method, TraceAction action)
- LOCKS_EXCLUDED(unique_methods_lock_);
- ArtMethod* DecodeTraceMethod(uint32_t tmid) LOCKS_EXCLUDED(unique_methods_lock_);
- std::string GetMethodLine(ArtMethod* method) LOCKS_EXCLUDED(unique_methods_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!*unique_methods_lock_);
+ ArtMethod* DecodeTraceMethod(uint32_t tmid) REQUIRES(!*unique_methods_lock_);
+ std::string GetMethodLine(ArtMethod* method) REQUIRES(!*unique_methods_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
// Singleton instance of the Trace or null when no method tracing is active.
static Trace* volatile the_trace_ GUARDED_BY(Locks::trace_lock_);
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 030478c..8ff0614 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -46,63 +46,63 @@
~Transaction();
void Abort(const std::string& abort_message)
- LOCKS_EXCLUDED(log_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!log_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void ThrowAbortError(Thread* self, const std::string* abort_message)
- LOCKS_EXCLUDED(log_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsAborted() LOCKS_EXCLUDED(log_lock_);
+ REQUIRES(!log_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsAborted() REQUIRES(!log_lock_);
// Record object field changes.
void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
bool is_volatile)
- LOCKS_EXCLUDED(log_lock_);
+ REQUIRES(!log_lock_);
void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
bool is_volatile)
- LOCKS_EXCLUDED(log_lock_);
+ REQUIRES(!log_lock_);
void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
bool is_volatile)
- LOCKS_EXCLUDED(log_lock_);
+ REQUIRES(!log_lock_);
void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
bool is_volatile)
- LOCKS_EXCLUDED(log_lock_);
+ REQUIRES(!log_lock_);
void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
bool is_volatile)
- LOCKS_EXCLUDED(log_lock_);
+ REQUIRES(!log_lock_);
void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
bool is_volatile)
- LOCKS_EXCLUDED(log_lock_);
+ REQUIRES(!log_lock_);
void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
mirror::Object* value, bool is_volatile)
- LOCKS_EXCLUDED(log_lock_);
+ REQUIRES(!log_lock_);
// Record array change.
void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value)
- LOCKS_EXCLUDED(log_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!log_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Record intern string table changes.
void RecordStrongStringInsertion(mirror::String* s)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
- LOCKS_EXCLUDED(log_lock_);
+ REQUIRES(Locks::intern_table_lock_)
+ REQUIRES(!log_lock_);
void RecordWeakStringInsertion(mirror::String* s)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
- LOCKS_EXCLUDED(log_lock_);
+ REQUIRES(Locks::intern_table_lock_)
+ REQUIRES(!log_lock_);
void RecordStrongStringRemoval(mirror::String* s)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
- LOCKS_EXCLUDED(log_lock_);
+ REQUIRES(Locks::intern_table_lock_)
+ REQUIRES(!log_lock_);
void RecordWeakStringRemoval(mirror::String* s)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
- LOCKS_EXCLUDED(log_lock_);
+ REQUIRES(Locks::intern_table_lock_)
+ REQUIRES(!log_lock_);
// Abort transaction by undoing all recorded changes.
void Rollback()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(log_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!log_lock_);
void VisitRoots(RootVisitor* visitor)
- LOCKS_EXCLUDED(log_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(!log_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
class ObjectLog : public ValueObject {
@@ -115,8 +115,8 @@
void Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile);
void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile);
- void Undo(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Undo(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
size_t Size() const {
return field_values_.size();
@@ -141,7 +141,7 @@
void LogValue(FieldValueKind kind, MemberOffset offset, uint64_t value, bool is_volatile);
void UndoFieldWrite(mirror::Object* obj, MemberOffset field_offset,
- const FieldValue& field_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const FieldValue& field_value) SHARED_REQUIRES(Locks::mutator_lock_);
// Maps field's offset to its value.
std::map<uint32_t, FieldValue> field_values_;
@@ -151,7 +151,7 @@
public:
void LogValue(size_t index, uint64_t value);
- void Undo(mirror::Array* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Undo(mirror::Array* obj) SHARED_REQUIRES(Locks::mutator_lock_);
size_t Size() const {
return array_values_.size();
@@ -159,7 +159,7 @@
private:
void UndoArrayWrite(mirror::Array* array, Primitive::Type array_type, size_t index,
- uint64_t value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint64_t value) SHARED_REQUIRES(Locks::mutator_lock_);
// Maps index to value.
// TODO use JValue instead ?
@@ -182,9 +182,9 @@
}
void Undo(InternTable* intern_table)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
- void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::intern_table_lock_);
+ void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
private:
mirror::String* str_;
@@ -193,31 +193,31 @@
};
void LogInternedString(const InternStringLog& log)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
- LOCKS_EXCLUDED(log_lock_);
+ REQUIRES(Locks::intern_table_lock_)
+ REQUIRES(!log_lock_);
void UndoObjectModifications()
- EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(log_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void UndoArrayModifications()
- EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(log_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void UndoInternStringTableModifications()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(Locks::intern_table_lock_)
+ REQUIRES(log_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void VisitObjectLogs(RootVisitor* visitor)
- EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(log_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void VisitArrayLogs(RootVisitor* visitor)
- EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(log_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void VisitStringLogs(RootVisitor* visitor)
- EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ REQUIRES(log_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
- const std::string& GetAbortMessage() LOCKS_EXCLUDED(log_lock_);
+ const std::string& GetAbortMessage() REQUIRES(!log_lock_);
Mutex log_lock_ ACQUIRED_AFTER(Locks::intern_table_lock_);
std::map<mirror::Object*, ObjectLog> object_logs_ GUARDED_BY(log_lock_);
diff --git a/runtime/utf.h b/runtime/utf.h
index 7f05248..1193d29 100644
--- a/runtime/utf.h
+++ b/runtime/utf.h
@@ -77,7 +77,7 @@
* The java.lang.String hashCode() algorithm.
*/
int32_t ComputeUtf16Hash(mirror::CharArray* chars, int32_t offset, size_t char_count)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
int32_t ComputeUtf16Hash(const uint16_t* chars, size_t char_count);
// Compute a hash code of a modified UTF-8 string. Not the standard java hash since it returns a
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 194d9fe..20512f9 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1130,9 +1130,13 @@
os << prefix << StringPrintf("#%02zu pc ", it->num);
bool try_addr2line = false;
if (!BacktraceMap::IsValid(it->map)) {
- os << StringPrintf("%08" PRIxPTR " ???", it->pc);
+ os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIxPTR " ???"
+ : "%08" PRIxPTR " ???",
+ it->pc);
} else {
- os << StringPrintf("%08" PRIxPTR " ", BacktraceMap::GetRelativePc(it->map, it->pc));
+ os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIxPTR " "
+ : "%08" PRIxPTR " ",
+ BacktraceMap::GetRelativePc(it->map, it->pc));
os << it->map.name;
os << " (";
if (!it->func_name.empty()) {
diff --git a/runtime/utils.h b/runtime/utils.h
index 1ef98e7..4fa5f5a 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -111,22 +111,22 @@
// "[[I" would be "int[][]", "[Ljava/lang/String;" would be
// "java.lang.String[]", and so forth.
std::string PrettyDescriptor(mirror::String* descriptor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
std::string PrettyDescriptor(const char* descriptor);
std::string PrettyDescriptor(mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
std::string PrettyDescriptor(Primitive::Type type);
// Returns a human-readable signature for 'f'. Something like "a.b.C.f" or
// "int a.b.C.f" (depending on the value of 'with_type').
std::string PrettyField(ArtField* f, bool with_type = true)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
std::string PrettyField(uint32_t field_idx, const DexFile& dex_file, bool with_type = true);
// Returns a human-readable signature for 'm'. Something like "a.b.C.m" or
// "a.b.C.m(II)V" (depending on the value of 'with_signature').
std::string PrettyMethod(ArtMethod* m, bool with_signature = true)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with_signature = true);
// Returns a human-readable form of the name of the *class* of the given object.
@@ -134,7 +134,7 @@
// be "java.lang.String". Given an array of int, the output would be "int[]".
// Given String.class, the output would be "java.lang.Class<java.lang.String>".
std::string PrettyTypeOf(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns a human-readable form of the type at an index in the specified dex file.
// Example outputs: char[], java.lang.String.
@@ -143,11 +143,11 @@
// Returns a human-readable form of the name of the given class.
// Given String.class, the output would be "java.lang.Class<java.lang.String>".
std::string PrettyClass(mirror::Class* c)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns a human-readable form of the name of the given class with its class loader.
std::string PrettyClassAndClassLoader(mirror::Class* c)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns a human-readable version of the Java part of the access flags, e.g., "private static "
// (note the trailing whitespace).
@@ -182,10 +182,10 @@
// Returns the JNI native function name for the non-overloaded method 'm'.
std::string JniShortName(ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the JNI native function name for the overloaded method 'm'.
std::string JniLongName(ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool ReadFileToString(const std::string& file_name, std::string* result);
bool PrintFileToLog(const std::string& file_name, LogSeverity level);
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 8c950a0..0181e5b 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -2874,6 +2874,13 @@
}
}
}
+ // Handle this like a RETURN_VOID now. Code is duplicated to separate standard from
+ // quickened opcodes (otherwise this could be a fall-through).
+ if (!IsConstructor()) {
+ if (!GetMethodReturnType().IsConflict()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-void not expected";
+ }
+ }
break;
// Note: the following instructions encode offsets derived from class linking.
// As such they use Class*/Field*/AbstractMethod* as these offsets only have
@@ -3590,7 +3597,7 @@
++pos_;
}
- const char* GetDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const char* GetDescriptor() SHARED_REQUIRES(Locks::mutator_lock_) {
return res_method_->GetTypeDescriptorFromTypeIdx(params_->GetTypeItem(pos_).type_idx_);
}
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index a2835f5..3b59bba 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -150,13 +150,13 @@
/* Verify a class. Returns "kNoFailure" on success. */
static FailureKind VerifyClass(Thread* self, mirror::Class* klass, bool allow_soft_failures,
std::string* error)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static FailureKind VerifyClass(Thread* self, const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
const DexFile::ClassDef* class_def,
bool allow_soft_failures, std::string* error)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static MethodVerifier* VerifyMethodAndDump(Thread* self,
VariableIndentationOutputStream* vios,
@@ -167,10 +167,10 @@
const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item, ArtMethod* method,
uint32_t method_access_flags)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static FailureKind VerifyMethod(ArtMethod* method, bool allow_soft_failures,
- std::string* error) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string* error) SHARED_REQUIRES(Locks::mutator_lock_);
uint8_t EncodePcToReferenceMapData() const;
@@ -193,29 +193,29 @@
// Dump the state of the verifier, namely each instruction, what flags are set on it, register
// information
- void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Dump(VariableIndentationOutputStream* vios) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Dump(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Dump(VariableIndentationOutputStream* vios) SHARED_REQUIRES(Locks::mutator_lock_);
// Fills 'monitor_enter_dex_pcs' with the dex pcs of the monitor-enter instructions corresponding
// to the locks held at 'dex_pc' in method 'm'.
static void FindLocksAtDexPc(ArtMethod* m, uint32_t dex_pc,
std::vector<uint32_t>* monitor_enter_dex_pcs)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the accessed field corresponding to the quick instruction's field
// offset at 'dex_pc' in method 'm'.
static ArtField* FindAccessedFieldAtDexPc(ArtMethod* m, uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the invoked method corresponding to the quick instruction's vtable
// index at 'dex_pc' in method 'm'.
static ArtMethod* FindInvokedMethodAtDexPc(ArtMethod* m, uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static SafeMap<uint32_t, std::set<uint32_t>> FindStringInitMap(ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void Init() SHARED_REQUIRES(Locks::mutator_lock_);
static void Shutdown();
bool CanLoadClasses() const {
@@ -228,7 +228,7 @@
ArtMethod* method,
uint32_t access_flags, bool can_load_classes, bool allow_soft_failures,
bool need_precise_constants, bool allow_thread_suspension)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: MethodVerifier(self, dex_file, dex_cache, class_loader, class_def, code_item, method_idx,
method, access_flags, can_load_classes, allow_soft_failures,
need_precise_constants, false, allow_thread_suspension) {}
@@ -237,22 +237,22 @@
// Run verification on the method. Returns true if verification completes and false if the input
// has an irrecoverable corruption.
- bool Verify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool Verify() SHARED_REQUIRES(Locks::mutator_lock_);
// Describe VRegs at the given dex pc.
std::vector<int32_t> DescribeVRegs(uint32_t dex_pc);
static void VisitStaticRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void VisitRoots(RootVisitor* visitor, const RootInfo& roots)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Accessors used by the compiler via CompilerCallback
const DexFile::CodeItem* CodeItem() const;
RegisterLine* GetRegLine(uint32_t dex_pc);
const InstructionFlags& GetInstructionFlags(size_t index) const;
- mirror::ClassLoader* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
MethodReference GetMethodReference() const;
uint32_t GetAccessFlags() const;
bool HasCheckCasts() const;
@@ -263,15 +263,15 @@
}
const RegType& ResolveCheckedClass(uint32_t class_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the method of a quick invoke or null if it cannot be found.
ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line,
bool is_range, bool allow_failure)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the access field of a quick field access (iget/iput-quick) or null
// if it cannot be found.
ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Is the method being verified a constructor?
bool IsConstructor() const {
@@ -295,7 +295,7 @@
ArtMethod* method, uint32_t access_flags,
bool can_load_classes, bool allow_soft_failures, bool need_precise_constants,
bool verify_to_dump, bool allow_thread_suspension)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Adds the given string to the beginning of the last failure message.
void PrependToLastFailMessage(std::string);
@@ -321,18 +321,18 @@
const DexFile::CodeItem* code_item,
ArtMethod* method, uint32_t method_access_flags,
bool allow_soft_failures, bool need_precise_constants)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- void FindLocksAtDexPc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FindLocksAtDexPc() SHARED_REQUIRES(Locks::mutator_lock_);
ArtField* FindAccessedFieldAtDexPc(uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindInvokedMethodAtDexPc(uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
SafeMap<uint32_t, std::set<uint32_t>>& FindStringInitMap()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Compute the width of the instruction at each address in the instruction stream, and store it in
@@ -360,7 +360,7 @@
* Returns "false" if something in the exception table looks fishy, but we're expecting the
* exception table to be somewhat sane.
*/
- bool ScanTryCatchBlocks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool ScanTryCatchBlocks() SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Perform static verification on all instructions in a method.
@@ -466,11 +466,11 @@
bool* selfOkay);
/* Perform detailed code-flow analysis on a single method. */
- bool VerifyCodeFlow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool VerifyCodeFlow() SHARED_REQUIRES(Locks::mutator_lock_);
// Set the register types for the first instruction in the method based on the method signature.
// This has the side-effect of validating the signature.
- bool SetTypesFromSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool SetTypesFromSignature() SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Perform code flow on a method.
@@ -518,7 +518,7 @@
* reordering by specifying that you can't execute the new-instance instruction if a register
* contains an uninitialized instance created by that same instruction.
*/
- bool CodeFlowVerifyMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool CodeFlowVerifyMethod() SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Perform verification for a single instruction.
@@ -530,33 +530,33 @@
* addresses. Does not set or clear any other flags in "insn_flags_".
*/
bool CodeFlowVerifyInstruction(uint32_t* start_guess)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Perform verification of a new array instruction
void VerifyNewArray(const Instruction* inst, bool is_filled, bool is_range)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Helper to perform verification on puts of primitive type.
void VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
- const uint32_t vregA) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const uint32_t vregA) SHARED_REQUIRES(Locks::mutator_lock_);
// Perform verification of an aget instruction. The destination register's type will be set to
// be that of component type of the array unless the array type is unknown, in which case a
// bottom type inferred from the type of instruction is used. is_primitive is false for an
// aget-object.
void VerifyAGet(const Instruction* inst, const RegType& insn_type,
- bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool is_primitive) SHARED_REQUIRES(Locks::mutator_lock_);
// Perform verification of an aput instruction.
void VerifyAPut(const Instruction* inst, const RegType& insn_type,
- bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool is_primitive) SHARED_REQUIRES(Locks::mutator_lock_);
// Lookup instance field and fail for resolution violations
ArtField* GetInstanceField(const RegType& obj_type, int field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Lookup static field and fail for resolution violations
- ArtField* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* GetStaticField(int field_idx) SHARED_REQUIRES(Locks::mutator_lock_);
// Perform verification of an iget/sget/iput/sput instruction.
enum class FieldAccessType { // private
@@ -566,16 +566,16 @@
template <FieldAccessType kAccType>
void VerifyISFieldAccess(const Instruction* inst, const RegType& insn_type,
bool is_primitive, bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template <FieldAccessType kAccType>
void VerifyQuickFieldAccess(const Instruction* inst, const RegType& insn_type, bool is_primitive)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Resolves a class based on an index and performs access checks to ensure the referrer can
// access the resolved class.
const RegType& ResolveClassAndCheckAccess(uint32_t class_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* For the "move-exception" instruction at "work_insn_idx_", which must be at an exception handler
@@ -583,7 +583,7 @@
* exception handler can be found or if the Join of exception types fails.
*/
const RegType& GetCaughtExceptionType()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Resolves a method based on an index and performs access checks to ensure
@@ -591,7 +591,7 @@
* Does not throw exceptions.
*/
ArtMethod* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Verify the arguments to a method. We're executing in "method", making
@@ -618,22 +618,22 @@
ArtMethod* VerifyInvocationArgs(const Instruction* inst,
MethodType method_type,
bool is_range, bool is_super)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Similar checks to the above, but on the proto. Will be used when the method cannot be
// resolved.
void VerifyInvocationArgsUnresolvedMethod(const Instruction* inst, MethodType method_type,
bool is_range)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template <class T>
ArtMethod* VerifyInvocationArgsFromIterator(T* it, const Instruction* inst,
MethodType method_type, bool is_range,
ArtMethod* res_method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* VerifyInvokeVirtualQuickArgs(const Instruction* inst, bool is_range)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Verify that the target instruction is not "move-exception". It's important that the only way
@@ -665,18 +665,18 @@
* Returns "false" if an error is encountered.
*/
bool UpdateRegisters(uint32_t next_insn, RegisterLine* merge_line, bool update_merge_line)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Return the register type for the method.
- const RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& GetMethodReturnType() SHARED_REQUIRES(Locks::mutator_lock_);
// Get a type representing the declaring class of the method.
- const RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
InstructionFlags* CurrentInsnFlags();
const RegType& DetermineCat1Constant(int32_t value, bool precise)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Try to create a register type from the given class. In case a precise type is requested, but
// the class is not instantiable, a soft error (of type NO_CLASS) will be enqueued and a
@@ -684,7 +684,7 @@
// Note: we reuse NO_CLASS as this will throw an exception at runtime, when the failing class is
// actually touched.
const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// The thread we're verifying on.
Thread* const self_;
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index 3994536..2ab6b4a 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -30,7 +30,7 @@
class MethodVerifierTest : public CommonRuntimeTest {
protected:
void VerifyClass(const std::string& descriptor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ASSERT_TRUE(descriptor != nullptr);
Thread* self = Thread::Current();
mirror::Class* klass = class_linker_->FindSystemClass(self, descriptor.c_str());
@@ -42,7 +42,7 @@
}
void VerifyDexFile(const DexFile& dex)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Verify all the classes defined in this file
for (size_t i = 0; i < dex.NumClassDefs(); i++) {
const DexFile::ClassDef& class_def = dex.GetClassDef(i);
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 6e23234..7fe8bb9 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -46,19 +46,19 @@
const IntegerType* IntegerType::instance_ = nullptr;
PrimitiveType::PrimitiveType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {
CHECK(klass != nullptr);
CHECK(!descriptor.empty());
}
Cat1Type::Cat1Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: PrimitiveType(klass, descriptor, cache_id) {
}
Cat2Type::Cat2Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: PrimitiveType(klass, descriptor, cache_id) {
}
@@ -280,7 +280,7 @@
}
}
-std::string UndefinedType::Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+std::string UndefinedType::Dump() const SHARED_REQUIRES(Locks::mutator_lock_) {
return "Undefined";
}
@@ -302,7 +302,9 @@
PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
: RegType(klass, descriptor, cache_id) {
- DCHECK(klass->IsInstantiable());
+ // Note: no check for IsInstantiable() here. We may produce this in case an InstantiationError
+ // would be thrown at runtime, but we need to continue verification and *not* create a
+ // hard failure or abort.
}
std::string UnresolvedMergedType::Dump() const {
@@ -538,7 +540,7 @@
}
}
-bool RegType::IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsObjectArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_) {
if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
// Primitive arrays will always resolve
DCHECK(descriptor_[1] == 'L' || descriptor_[1] == '[');
@@ -551,11 +553,11 @@
}
}
-bool RegType::IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsJavaLangObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
return IsReference() && GetClass()->IsObjectClass();
}
-bool RegType::IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_) {
if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
return descriptor_[0] == '[';
} else if (HasClass()) {
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index d08c937..4893088 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -112,7 +112,7 @@
}
// The high half that corresponds to this low half
const RegType& HighHalf(RegTypeCache* cache) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool IsConstantBoolean() const;
virtual bool IsConstantChar() const { return false; }
@@ -165,20 +165,20 @@
return result;
}
virtual bool HasClassVirtual() const { return false; }
- bool IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsJavaLangObject() const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsObjectArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_);
Primitive::Type GetPrimitiveType() const;
bool IsJavaLangObjectArray() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsInstantiableTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsInstantiableTypes() const SHARED_REQUIRES(Locks::mutator_lock_);
const std::string& GetDescriptor() const {
DCHECK(HasClass() ||
(IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
!IsUnresolvedSuperClass()));
return descriptor_;
}
- mirror::Class* GetClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* GetClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!IsUnresolvedReference());
DCHECK(!klass_.IsNull()) << Dump();
DCHECK(HasClass());
@@ -186,25 +186,25 @@
}
uint16_t GetId() const { return cache_id_; }
const RegType& GetSuperClass(RegTypeCache* cache) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
virtual std::string Dump() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
// Can this type access other?
bool CanAccess(const RegType& other) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can this type access a member with the given properties?
bool CanAccessMember(mirror::Class* klass, uint32_t access_flags) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can this type be assigned by src?
// Note: Object and interface types may always be assigned to one another, see
// comment on
// ClassJoin.
bool IsAssignableFrom(const RegType& src) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can this array type potentially be assigned by src.
// This function is necessary as array types are valid even if their components types are not,
@@ -215,13 +215,13 @@
// (both are reference types).
bool CanAssignArray(const RegType& src, RegTypeCache& reg_types,
Handle<mirror::ClassLoader> class_loader, bool* soft_error) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can this type be assigned by src? Variant of IsAssignableFrom that doesn't
// allow assignment to
// an interface from an Object.
bool IsStrictlyAssignableFrom(const RegType& src) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Are these RegTypes the same?
bool Equals(const RegType& other) const { return GetId() == other.GetId(); }
@@ -229,7 +229,7 @@
// Compute the merge of this register from one edge (path) with incoming_type
// from another.
const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* A basic Join operation on classes. For a pair of types S and T the Join,
@@ -258,23 +258,23 @@
* [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy
*/
static mirror::Class* ClassJoin(mirror::Class* s, mirror::Class* t)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
virtual ~RegType() {}
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
protected:
RegType(mirror::Class* klass, const std::string& descriptor,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: descriptor_(descriptor), klass_(klass), cache_id_(cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
}
}
- void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
const std::string descriptor_;
mutable GcRoot<mirror::Class>
@@ -285,7 +285,7 @@
private:
static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(RegType);
};
@@ -295,7 +295,7 @@
public:
bool IsConflict() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
// Get the singleton Conflict instance.
static const ConflictType* GetInstance() PURE;
@@ -304,14 +304,14 @@
static const ConflictType* CreateInstance(mirror::Class* klass,
const std::string& descriptor,
uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Destroy the singleton instance.
static void Destroy();
private:
ConflictType(mirror::Class* klass, const std::string& descriptor,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {}
static const ConflictType* instance_;
@@ -324,7 +324,7 @@
public:
bool IsUndefined() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
// Get the singleton Undefined instance.
static const UndefinedType* GetInstance() PURE;
@@ -333,14 +333,14 @@
static const UndefinedType* CreateInstance(mirror::Class* klass,
const std::string& descriptor,
uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Destroy the singleton instance.
static void Destroy();
private:
UndefinedType(mirror::Class* klass, const std::string& descriptor,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {}
static const UndefinedType* instance_;
@@ -349,7 +349,7 @@
class PrimitiveType : public RegType {
public:
PrimitiveType(mirror::Class* klass, const std::string& descriptor,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
bool HasClassVirtual() const OVERRIDE { return true; }
};
@@ -357,23 +357,23 @@
class Cat1Type : public PrimitiveType {
public:
Cat1Type(mirror::Class* klass, const std::string& descriptor,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
};
class IntegerType : public Cat1Type {
public:
bool IsInteger() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const IntegerType* CreateInstance(mirror::Class* klass,
const std::string& descriptor,
uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static const IntegerType* GetInstance() PURE;
static void Destroy();
private:
IntegerType(mirror::Class* klass, const std::string& descriptor,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const IntegerType* instance_;
};
@@ -381,17 +381,17 @@
class BooleanType FINAL : public Cat1Type {
public:
bool IsBoolean() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const BooleanType* CreateInstance(mirror::Class* klass,
const std::string& descriptor,
uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static const BooleanType* GetInstance() PURE;
static void Destroy();
private:
BooleanType(mirror::Class* klass, const std::string& descriptor,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const BooleanType* instance_;
@@ -400,17 +400,17 @@
class ByteType FINAL : public Cat1Type {
public:
bool IsByte() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const ByteType* CreateInstance(mirror::Class* klass,
const std::string& descriptor,
uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static const ByteType* GetInstance() PURE;
static void Destroy();
private:
ByteType(mirror::Class* klass, const std::string& descriptor,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const ByteType* instance_;
};
@@ -418,17 +418,17 @@
class ShortType FINAL : public Cat1Type {
public:
bool IsShort() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const ShortType* CreateInstance(mirror::Class* klass,
const std::string& descriptor,
uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static const ShortType* GetInstance() PURE;
static void Destroy();
private:
ShortType(mirror::Class* klass, const std::string& descriptor,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const ShortType* instance_;
};
@@ -436,17 +436,17 @@
class CharType FINAL : public Cat1Type {
public:
bool IsChar() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const CharType* CreateInstance(mirror::Class* klass,
const std::string& descriptor,
uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static const CharType* GetInstance() PURE;
static void Destroy();
private:
CharType(mirror::Class* klass, const std::string& descriptor,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const CharType* instance_;
};
@@ -454,17 +454,17 @@
class FloatType FINAL : public Cat1Type {
public:
bool IsFloat() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const FloatType* CreateInstance(mirror::Class* klass,
const std::string& descriptor,
uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static const FloatType* GetInstance() PURE;
static void Destroy();
private:
FloatType(mirror::Class* klass, const std::string& descriptor,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const FloatType* instance_;
};
@@ -472,86 +472,86 @@
class Cat2Type : public PrimitiveType {
public:
Cat2Type(mirror::Class* klass, const std::string& descriptor,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
};
class LongLoType FINAL : public Cat2Type {
public:
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
bool IsLongLo() const OVERRIDE { return true; }
bool IsLong() const OVERRIDE { return true; }
static const LongLoType* CreateInstance(mirror::Class* klass,
const std::string& descriptor,
uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static const LongLoType* GetInstance() PURE;
static void Destroy();
private:
LongLoType(mirror::Class* klass, const std::string& descriptor,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const LongLoType* instance_;
};
class LongHiType FINAL : public Cat2Type {
public:
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
bool IsLongHi() const OVERRIDE { return true; }
static const LongHiType* CreateInstance(mirror::Class* klass,
const std::string& descriptor,
uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static const LongHiType* GetInstance() PURE;
static void Destroy();
private:
LongHiType(mirror::Class* klass, const std::string& descriptor,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const LongHiType* instance_;
};
class DoubleLoType FINAL : public Cat2Type {
public:
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
bool IsDoubleLo() const OVERRIDE { return true; }
bool IsDouble() const OVERRIDE { return true; }
static const DoubleLoType* CreateInstance(mirror::Class* klass,
const std::string& descriptor,
uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static const DoubleLoType* GetInstance() PURE;
static void Destroy();
private:
DoubleLoType(mirror::Class* klass, const std::string& descriptor,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const DoubleLoType* instance_;
};
class DoubleHiType FINAL : public Cat2Type {
public:
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
virtual bool IsDoubleHi() const OVERRIDE { return true; }
static const DoubleHiType* CreateInstance(mirror::Class* klass,
const std::string& descriptor,
uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static const DoubleHiType* GetInstance() PURE;
static void Destroy();
private:
DoubleHiType(mirror::Class* klass, const std::string& descriptor,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const DoubleHiType* instance_;
};
class ConstantType : public RegType {
public:
- ConstantType(uint32_t constant, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ ConstantType(uint32_t constant, uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: RegType(nullptr, "", cache_id), constant_(constant) {
}
@@ -609,58 +609,58 @@
class PreciseConstType FINAL : public ConstantType {
public:
PreciseConstType(uint32_t constant, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {}
bool IsPreciseConstant() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
};
class PreciseConstLoType FINAL : public ConstantType {
public:
PreciseConstLoType(uint32_t constant, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {}
bool IsPreciseConstantLo() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
};
class PreciseConstHiType FINAL : public ConstantType {
public:
PreciseConstHiType(uint32_t constant, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {}
bool IsPreciseConstantHi() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
};
class ImpreciseConstType FINAL : public ConstantType {
public:
ImpreciseConstType(uint32_t constat, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: ConstantType(constat, cache_id) {
}
bool IsImpreciseConstant() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
};
class ImpreciseConstLoType FINAL : public ConstantType {
public:
ImpreciseConstLoType(uint32_t constant, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {}
bool IsImpreciseConstantLo() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
};
class ImpreciseConstHiType FINAL : public ConstantType {
public:
ImpreciseConstHiType(uint32_t constant, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {}
bool IsImpreciseConstantHi() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
};
// Common parent of all uninitialized types. Uninitialized types are created by
@@ -690,14 +690,14 @@
UninitializedReferenceType(mirror::Class* klass,
const std::string& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: UninitializedType(klass, descriptor, allocation_pc, cache_id) {}
bool IsUninitializedReference() const OVERRIDE { return true; }
bool HasClassVirtual() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
};
// Similar to UnresolvedReferenceType but not yet having been passed to a
@@ -706,7 +706,7 @@
public:
UnresolvedUninitializedRefType(const std::string& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: UninitializedType(nullptr, descriptor, allocation_pc, cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
@@ -717,10 +717,10 @@
bool IsUnresolvedTypes() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
private:
- void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
};
// Similar to UninitializedReferenceType but special case for the this argument
@@ -730,7 +730,7 @@
UninitializedThisReferenceType(mirror::Class* klass,
const std::string& descriptor,
uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: UninitializedType(klass, descriptor, 0, cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
@@ -741,17 +741,17 @@
bool HasClassVirtual() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
private:
- void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
};
class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
public:
UnresolvedUninitializedThisRefType(const std::string& descriptor,
uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: UninitializedType(nullptr, descriptor, 0, cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
@@ -762,10 +762,10 @@
bool IsUnresolvedTypes() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
private:
- void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
};
// A type of register holding a reference to an Object of type GetClass or a
@@ -773,7 +773,7 @@
class ReferenceType FINAL : public RegType {
public:
ReferenceType(mirror::Class* klass, const std::string& descriptor,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {}
bool IsReference() const OVERRIDE { return true; }
@@ -782,7 +782,7 @@
bool HasClassVirtual() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
};
// A type of register holding a reference to an Object of type GetClass and only
@@ -792,7 +792,7 @@
public:
PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool IsPreciseReference() const OVERRIDE { return true; }
@@ -800,14 +800,14 @@
bool HasClassVirtual() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
};
// Common parent of unresolved types.
class UnresolvedType : public RegType {
public:
UnresolvedType(const std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: RegType(nullptr, descriptor, cache_id) {}
bool IsNonZeroReferenceTypes() const OVERRIDE;
@@ -819,7 +819,7 @@
class UnresolvedReferenceType FINAL : public UnresolvedType {
public:
UnresolvedReferenceType(const std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: UnresolvedType(descriptor, cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
@@ -830,10 +830,10 @@
bool IsUnresolvedTypes() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
private:
- void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
};
// Type representing the super-class of an unresolved type.
@@ -841,7 +841,7 @@
public:
UnresolvedSuperClass(uint16_t child_id, RegTypeCache* reg_type_cache,
uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: UnresolvedType("", cache_id),
unresolved_child_id_(child_id),
reg_type_cache_(reg_type_cache) {
@@ -859,10 +859,10 @@
return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
}
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
private:
- void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
const uint16_t unresolved_child_id_;
const RegTypeCache* const reg_type_cache_;
@@ -875,7 +875,7 @@
public:
UnresolvedMergedType(uint16_t left_id, uint16_t right_id,
const RegTypeCache* reg_type_cache, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: UnresolvedType("", cache_id),
reg_type_cache_(reg_type_cache),
merged_types_(left_id, right_id) {
@@ -897,17 +897,17 @@
bool IsUnresolvedTypes() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
private:
- void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
const RegTypeCache* const reg_type_cache_;
const std::pair<uint16_t, uint16_t> merged_types_;
};
std::ostream& operator<<(std::ostream& os, const RegType& rhs)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
} // namespace verifier
} // namespace art
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index b371d7e..4469e64 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -31,7 +31,7 @@
const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
static bool MatchingPrecisionForClass(const RegType* entry, bool precise)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (entry->IsPreciseReference() == precise) {
// We were or weren't looking for a precise reference and we found what we need.
return true;
@@ -427,9 +427,18 @@
}
}
entry = new ReferenceType(klass, "", entries_.size());
- } else if (klass->IsInstantiable()) {
+ } else if (!klass->IsPrimitive()) {
// We're uninitialized because of allocation, look or create a precise type as allocations
// may only create objects of that type.
+ // Note: we do not check whether the given klass is actually instantiable (besides being
+ // primitive), that is, we allow interfaces and abstract classes here. The reasoning is
+ // twofold:
+ // 1) The "new-instance" instruction to generate the uninitialized type will already
+ // queue an instantiation error. This is a soft error that must be thrown at runtime,
+ // and could potentially change if the class is resolved differently at runtime.
+ // 2) Checking whether the klass is instantiable and using conflict may produce a hard
+ // error when the value is used, which leads to a VerifyError, which is not the
+ // correct semantics.
for (size_t i = primitive_count_; i < entries_.size(); i++) {
const RegType* cur_entry = entries_[i];
if (cur_entry->IsPreciseReference() && cur_entry->GetClass() == klass) {
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 4b3105c..8319de6 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -42,7 +42,7 @@
public:
explicit RegTypeCache(bool can_load_classes);
~RegTypeCache();
- static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static void Init() SHARED_REQUIRES(Locks::mutator_lock_) {
if (!RegTypeCache::primitive_initialized_) {
CHECK_EQ(RegTypeCache::primitive_count_, 0);
CreatePrimitiveAndSmallConstantTypes();
@@ -53,110 +53,110 @@
static void ShutDown();
const art::verifier::RegType& GetFromId(uint16_t id) const;
const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const ConstantType& FromCat1Const(int32_t value, bool precise)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const ConstantType& FromCat2ConstLo(int32_t value, bool precise)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const ConstantType& FromCat2ConstHi(int32_t value, bool precise)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const RegType& FromUnresolvedSuperClass(const RegType& child)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ const ConstantType& Zero() SHARED_REQUIRES(Locks::mutator_lock_) {
return FromCat1Const(0, true);
}
- const ConstantType& One() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const ConstantType& One() SHARED_REQUIRES(Locks::mutator_lock_) {
return FromCat1Const(1, true);
}
size_t GetCacheSize() {
return entries_.size();
}
- const BooleanType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const BooleanType& Boolean() SHARED_REQUIRES(Locks::mutator_lock_) {
return *BooleanType::GetInstance();
}
- const ByteType& Byte() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const ByteType& Byte() SHARED_REQUIRES(Locks::mutator_lock_) {
return *ByteType::GetInstance();
}
- const CharType& Char() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const CharType& Char() SHARED_REQUIRES(Locks::mutator_lock_) {
return *CharType::GetInstance();
}
- const ShortType& Short() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const ShortType& Short() SHARED_REQUIRES(Locks::mutator_lock_) {
return *ShortType::GetInstance();
}
- const IntegerType& Integer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const IntegerType& Integer() SHARED_REQUIRES(Locks::mutator_lock_) {
return *IntegerType::GetInstance();
}
- const FloatType& Float() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const FloatType& Float() SHARED_REQUIRES(Locks::mutator_lock_) {
return *FloatType::GetInstance();
}
- const LongLoType& LongLo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const LongLoType& LongLo() SHARED_REQUIRES(Locks::mutator_lock_) {
return *LongLoType::GetInstance();
}
- const LongHiType& LongHi() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const LongHiType& LongHi() SHARED_REQUIRES(Locks::mutator_lock_) {
return *LongHiType::GetInstance();
}
- const DoubleLoType& DoubleLo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DoubleLoType& DoubleLo() SHARED_REQUIRES(Locks::mutator_lock_) {
return *DoubleLoType::GetInstance();
}
- const DoubleHiType& DoubleHi() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DoubleHiType& DoubleHi() SHARED_REQUIRES(Locks::mutator_lock_) {
return *DoubleHiType::GetInstance();
}
- const UndefinedType& Undefined() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const UndefinedType& Undefined() SHARED_REQUIRES(Locks::mutator_lock_) {
return *UndefinedType::GetInstance();
}
const ConflictType& Conflict() {
return *ConflictType::GetInstance();
}
- const PreciseReferenceType& JavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const PreciseReferenceType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& JavaLangThrowable(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const PreciseReferenceType& JavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ const PreciseReferenceType& JavaLangString() SHARED_REQUIRES(Locks::mutator_lock_);
+ const RegType& JavaLangThrowable(bool precise) SHARED_REQUIRES(Locks::mutator_lock_);
+ const RegType& JavaLangObject(bool precise) SHARED_REQUIRES(Locks::mutator_lock_);
const UninitializedType& Uninitialized(const RegType& type, uint32_t allocation_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Create an uninitialized 'this' argument for the given type.
const UninitializedType& UninitializedThisArgument(const RegType& type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const RegType& FromUninitialized(const RegType& uninit_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& CharConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& PosByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& PosShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ const ImpreciseConstType& ByteConstant() SHARED_REQUIRES(Locks::mutator_lock_);
+ const ImpreciseConstType& CharConstant() SHARED_REQUIRES(Locks::mutator_lock_);
+ const ImpreciseConstType& ShortConstant() SHARED_REQUIRES(Locks::mutator_lock_);
+ const ImpreciseConstType& IntConstant() SHARED_REQUIRES(Locks::mutator_lock_);
+ const ImpreciseConstType& PosByteConstant() SHARED_REQUIRES(Locks::mutator_lock_);
+ const ImpreciseConstType& PosShortConstant() SHARED_REQUIRES(Locks::mutator_lock_);
const RegType& GetComponentType(const RegType& array, mirror::ClassLoader* loader)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ void Dump(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
const RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void VisitStaticRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
- void FillPrimitiveAndSmallConstantTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FillPrimitiveAndSmallConstantTypes() SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Class* ResolveClass(const char* descriptor, mirror::ClassLoader* loader)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool MatchDescriptor(size_t idx, const StringPiece& descriptor, bool precise)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void AddEntry(RegType* new_entry);
template <class Type>
static const Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void CreatePrimitiveAndSmallConstantTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ static void CreatePrimitiveAndSmallConstantTypes() SHARED_REQUIRES(Locks::mutator_lock_);
// A quick look up for popular small constants.
static constexpr int32_t kMinSmallConstant = -1;
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 0de0d9c..4fb3a2c 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -60,54 +60,54 @@
// Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst".
void CopyRegister1(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc, TypeCategory cat)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Implement category-2 "move" instructions. Copy a 64-bit value from "vsrc" to "vdst". This
// copies both halves of the register.
void CopyRegister2(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Implement "move-result". Copy the category-1 value from the result register to another
// register, and reset the result register.
void CopyResultRegister1(MethodVerifier* verifier, uint32_t vdst, bool is_reference)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Implement "move-result-wide". Copy the category-2 value from the result register to another
// register, and reset the result register.
void CopyResultRegister2(MethodVerifier* verifier, uint32_t vdst)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Set the invisible result register to unknown
- void SetResultTypeToUnknown(MethodVerifier* verifier) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetResultTypeToUnknown(MethodVerifier* verifier) SHARED_REQUIRES(Locks::mutator_lock_);
// Set the type of register N, verifying that the register is valid. If "newType" is the "Lo"
// part of a 64-bit value, register N+1 will be set to "newType+1".
// The register index was validated during the static pass, so we don't need to check it here.
ALWAYS_INLINE bool SetRegisterType(MethodVerifier* verifier, uint32_t vdst,
const RegType& new_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool SetRegisterTypeWide(MethodVerifier* verifier, uint32_t vdst, const RegType& new_type1,
const RegType& new_type2)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
/* Set the type of the "result" register. */
void SetResultRegisterType(MethodVerifier* verifier, const RegType& new_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void SetResultRegisterTypeWide(const RegType& new_type1, const RegType& new_type2)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get the type of register vsrc.
const RegType& GetRegisterType(MethodVerifier* verifier, uint32_t vsrc) const;
ALWAYS_INLINE bool VerifyRegisterType(MethodVerifier* verifier, uint32_t vsrc,
const RegType& check_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool VerifyRegisterTypeWide(MethodVerifier* verifier, uint32_t vsrc, const RegType& check_type1,
const RegType& check_type2)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CopyFromLine(const RegisterLine* src) {
DCHECK_EQ(num_regs_, src->num_regs_);
@@ -116,7 +116,7 @@
reg_to_lock_depths_ = src->reg_to_lock_depths_;
}
- std::string Dump(MethodVerifier* verifier) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump(MethodVerifier* verifier) const SHARED_REQUIRES(Locks::mutator_lock_);
void FillWithGarbage() {
memset(&line_, 0xf1, num_regs_ * sizeof(uint16_t));
@@ -131,7 +131,7 @@
* the new ones at the same time).
*/
void MarkUninitRefsAsInvalid(MethodVerifier* verifier, const RegType& uninit_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Update all registers holding "uninit_type" to instead hold the corresponding initialized
@@ -140,7 +140,7 @@
*/
void MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type,
uint32_t this_reg, uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Update all registers to be Conflict except vsrc.
@@ -194,7 +194,7 @@
*/
const RegType& GetInvocationThis(MethodVerifier* verifier, const Instruction* inst,
bool is_range, bool allow_failure = false)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Verify types for a simple two-register instruction (e.g. "neg-int").
@@ -202,22 +202,22 @@
*/
void CheckUnaryOp(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type,
const RegType& src_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CheckUnaryOpWide(MethodVerifier* verifier, const Instruction* inst,
const RegType& dst_type1, const RegType& dst_type2,
const RegType& src_type1, const RegType& src_type2)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CheckUnaryOpToWide(MethodVerifier* verifier, const Instruction* inst,
const RegType& dst_type1, const RegType& dst_type2,
const RegType& src_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CheckUnaryOpFromWide(MethodVerifier* verifier, const Instruction* inst,
const RegType& dst_type,
const RegType& src_type1, const RegType& src_type2)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Verify types for a simple three-register instruction (e.g. "add-int").
@@ -227,18 +227,18 @@
void CheckBinaryOp(MethodVerifier* verifier, const Instruction* inst,
const RegType& dst_type, const RegType& src_type1, const RegType& src_type2,
bool check_boolean_op)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CheckBinaryOpWide(MethodVerifier* verifier, const Instruction* inst,
const RegType& dst_type1, const RegType& dst_type2,
const RegType& src_type1_1, const RegType& src_type1_2,
const RegType& src_type2_1, const RegType& src_type2_2)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CheckBinaryOpWideShift(MethodVerifier* verifier, const Instruction* inst,
const RegType& long_lo_type, const RegType& long_hi_type,
const RegType& int_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Verify types for a binary "2addr" operation. "src_type1"/"src_type2"
@@ -248,18 +248,18 @@
const RegType& dst_type,
const RegType& src_type1, const RegType& src_type2,
bool check_boolean_op)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CheckBinaryOp2addrWide(MethodVerifier* verifier, const Instruction* inst,
const RegType& dst_type1, const RegType& dst_type2,
const RegType& src_type1_1, const RegType& src_type1_2,
const RegType& src_type2_1, const RegType& src_type2_2)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CheckBinaryOp2addrWideShift(MethodVerifier* verifier, const Instruction* inst,
const RegType& long_lo_type, const RegType& long_hi_type,
const RegType& int_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Verify types for A two-register instruction with a literal constant (e.g. "add-int/lit8").
@@ -270,15 +270,15 @@
void CheckLiteralOp(MethodVerifier* verifier, const Instruction* inst,
const RegType& dst_type, const RegType& src_type,
bool check_boolean_op, bool is_lit16)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Verify/push monitor onto the monitor stack, locking the value in reg_idx at location insn_idx.
void PushMonitor(MethodVerifier* verifier, uint32_t reg_idx, int32_t insn_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Verify/pop monitor from monitor stack ensuring that we believe the monitor is locked
void PopMonitor(MethodVerifier* verifier, uint32_t reg_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Stack of currently held monitors and where they were locked
size_t MonitorStackDepth() const {
@@ -290,7 +290,7 @@
bool VerifyMonitorStackEmpty(MethodVerifier* verifier) const;
bool MergeRegisters(MethodVerifier* verifier, const RegisterLine* incoming_line)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
size_t GetMaxNonZeroReferenceReg(MethodVerifier* verifier, size_t max_ref_reg) const;
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 66b9abe..6dd8168 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -38,7 +38,7 @@
static jmethodID StringInitToStringFactoryMethodID(jmethodID string_init);
static mirror::Class* ToClass(jclass global_jclass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static jclass com_android_dex_Dex;
static jclass dalvik_system_DexFile;
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index 1391d14..c984b17 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
#include <android/log.h>
#else
#include <stdarg.h>
@@ -103,7 +103,7 @@
va_list ap;
va_start(ap, format);
vsnprintf(buf, sizeof(buf), format, ap);
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
__android_log_write(ANDROID_LOG_ERROR, "libsigchain", buf);
#else
std::cout << buf << "\n";
@@ -337,14 +337,16 @@
// In case the chain isn't claimed, claim it for ourself so we can ensure the managed handler
// goes first.
if (!user_sigactions[signal].IsClaimed()) {
- struct sigaction tmp;
- tmp.sa_sigaction = sigchainlib_managed_handler_sigaction;
- sigemptyset(&tmp.sa_mask);
- tmp.sa_flags = SA_SIGINFO | SA_ONSTACK;
+ struct sigaction act, old_act;
+ act.sa_sigaction = sigchainlib_managed_handler_sigaction;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_SIGINFO | SA_ONSTACK;
#if !defined(__APPLE__) && !defined(__mips__)
- tmp.sa_restorer = nullptr;
+ act.sa_restorer = nullptr;
#endif
- user_sigactions[signal].Claim(tmp);
+ if (sigaction(signal, &act, &old_act) != -1) {
+ user_sigactions[signal].Claim(old_act);
+ }
}
}
diff --git a/sigchainlib/sigchain_dummy.cc b/sigchainlib/sigchain_dummy.cc
index 8495a54..dfe0c6f 100644
--- a/sigchainlib/sigchain_dummy.cc
+++ b/sigchainlib/sigchain_dummy.cc
@@ -17,7 +17,7 @@
#include <stdio.h>
#include <stdlib.h>
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
#include <android/log.h>
#else
#include <stdarg.h>
@@ -38,7 +38,7 @@
va_list ap;
va_start(ap, format);
vsnprintf(buf, sizeof(buf), format, ap);
-#ifdef HAVE_ANDROID_OS
+#ifdef __ANDROID__
__android_log_write(ANDROID_LOG_ERROR, "libsigchain", buf);
#else
std::cout << buf << "\n";
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index e626e48..767e1de 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -29,10 +29,10 @@
} while (false);
struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
- explicit ReferenceMap2Visitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ explicit ReferenceMap2Visitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
: CheckReferenceMapVisitor(thread) {}
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
if (CheckReferenceMapVisitor::VisitFrame()) {
return true;
}
diff --git a/test/004-StackWalk/stack_walk_jni.cc b/test/004-StackWalk/stack_walk_jni.cc
index 6b15514..3a5854b 100644
--- a/test/004-StackWalk/stack_walk_jni.cc
+++ b/test/004-StackWalk/stack_walk_jni.cc
@@ -29,10 +29,10 @@
class TestReferenceMapVisitor : public CheckReferenceMapVisitor {
public:
- explicit TestReferenceMapVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ explicit TestReferenceMapVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
: CheckReferenceMapVisitor(thread) {}
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
if (CheckReferenceMapVisitor::VisitFrame()) {
return true;
}
diff --git a/test/046-reflect/expected.txt b/test/046-reflect/expected.txt
index fa053fb..d657d44 100644
--- a/test/046-reflect/expected.txt
+++ b/test/046-reflect/expected.txt
@@ -24,7 +24,7 @@
SuperTarget constructor ()V
Target constructor ()V
Before, float is 3.1415925
-myMethod: hi there 3.1415925 Q !
+myMethod: hi there 3.1415925 ✔ !
Result of invoke: 7
Calling no-arg void-return method
myNoargMethod ()V
diff --git a/test/046-reflect/src/Main.java b/test/046-reflect/src/Main.java
index 0d8e576..0c90109 100644
--- a/test/046-reflect/src/Main.java
+++ b/test/046-reflect/src/Main.java
@@ -147,7 +147,7 @@
Object[] argList = new Object[] {
new String[] { "hi there" },
new Float(3.1415926f),
- new Character('Q')
+ new Character('\u2714')
};
System.out.println("Before, float is "
+ ((Float)argList[1]).floatValue());
diff --git a/test/051-thread/thread_test.cc b/test/051-thread/thread_test.cc
index 2b8e675..4215207 100644
--- a/test/051-thread/thread_test.cc
+++ b/test/051-thread/thread_test.cc
@@ -28,7 +28,7 @@
extern "C" JNIEXPORT jboolean JNICALL Java_Main_supportsThreadPriorities(
JNIEnv* env ATTRIBUTE_UNUSED,
jclass clazz ATTRIBUTE_UNUSED) {
-#if defined(HAVE_ANDROID_OS)
+#if defined(__ANDROID__)
return JNI_TRUE;
#else
return JNI_FALSE;
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index 177c5a4..77c1a99 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -45,6 +45,8 @@
test_Long_reverseBytes();
test_Integer_reverse();
test_Long_reverse();
+ test_Integer_numberOfLeadingZeros();
+ test_Long_numberOfLeadingZeros();
test_StrictMath_abs_I();
test_StrictMath_abs_J();
test_StrictMath_min_I();
@@ -1041,6 +1043,24 @@
return (r1 / i1) + (r2 / i2) + i3 + i4 + i5 + i6 + i7 + i8;
}
+ public static void test_Integer_numberOfLeadingZeros() {
+ Assert.assertEquals(Integer.numberOfLeadingZeros(0), Integer.SIZE);
+ for (int i = 0; i < Integer.SIZE; i++) {
+ Assert.assertEquals(Integer.numberOfLeadingZeros(1 << i), Integer.SIZE - 1 - i);
+ Assert.assertEquals(Integer.numberOfLeadingZeros((1 << i) | 1), Integer.SIZE - 1 - i);
+ Assert.assertEquals(Integer.numberOfLeadingZeros(0xFFFFFFFF >>> i), i);
+ }
+ }
+
+ public static void test_Long_numberOfLeadingZeros() {
+ Assert.assertEquals(Long.numberOfLeadingZeros(0L), Long.SIZE);
+ for (int i = 0; i < Long.SIZE; i++) {
+ Assert.assertEquals(Long.numberOfLeadingZeros(1L << i), Long.SIZE - 1 - i);
+ Assert.assertEquals(Long.numberOfLeadingZeros((1L << i) | 1L), Long.SIZE - 1 - i);
+ Assert.assertEquals(Long.numberOfLeadingZeros(0xFFFFFFFFFFFFFFFFL >>> i), i);
+ }
+ }
+
static Object runtime;
static Method address_of;
static Method new_non_movable_array;
diff --git a/test/100-reflect2/expected.txt b/test/100-reflect2/expected.txt
index 7db61a1..c932761 100644
--- a/test/100-reflect2/expected.txt
+++ b/test/100-reflect2/expected.txt
@@ -1,6 +1,6 @@
true
8
-x
+✔
3.141592653589793
3.14
32
diff --git a/test/100-reflect2/src/Main.java b/test/100-reflect2/src/Main.java
index 72e14b1..bf3a574 100644
--- a/test/100-reflect2/src/Main.java
+++ b/test/100-reflect2/src/Main.java
@@ -20,7 +20,7 @@
class Main {
private static boolean z = true;
private static byte b = 8;
- private static char c = 'x';
+ private static char c = '\u2714';
private static double d = Math.PI;
private static float f = 3.14f;
private static int i = 32;
@@ -144,7 +144,7 @@
/*
private static boolean z = true;
private static byte b = 8;
- private static char c = 'x';
+ private static char c = '\u2714';
private static double d = Math.PI;
private static float f = 3.14f;
private static int i = 32;
@@ -263,7 +263,7 @@
show(ctor.newInstance((Object[]) null));
ctor = String.class.getConstructor(char[].class, int.class, int.class);
- show(ctor.newInstance(new char[] { 'x', 'y', 'z', '!' }, 1, 2));
+ show(ctor.newInstance(new char[] { '\u2714', 'y', 'z', '!' }, 1, 2));
}
private static void testPackagePrivateConstructor() {
diff --git a/test/109-suspend-check/src/Main.java b/test/109-suspend-check/src/Main.java
index 8046d75..3c3353b 100644
--- a/test/109-suspend-check/src/Main.java
+++ b/test/109-suspend-check/src/Main.java
@@ -32,6 +32,8 @@
new InfiniteWhileLoopWithSpecialPutOrNop(new SpecialMethods2()),
new InfiniteWhileLoopWithSpecialConstOrIGet(new SpecialMethods1()),
new InfiniteWhileLoopWithSpecialConstOrIGet(new SpecialMethods2()),
+ new InfiniteWhileLoopWithSpecialConstOrIGetInTryCatch(new SpecialMethods1()),
+ new InfiniteWhileLoopWithSpecialConstOrIGetInTryCatch(new SpecialMethods2()),
};
doWhileLoopWithLong.start();
for (SimpleLoopThread loop : simpleLoops) {
@@ -135,6 +137,21 @@
}
}
+class InfiniteWhileLoopWithSpecialConstOrIGetInTryCatch extends SimpleLoopThread {
+ private SpecialMethodInterface smi;
+ public InfiniteWhileLoopWithSpecialConstOrIGetInTryCatch(SpecialMethodInterface smi) {
+ this.smi = smi;
+ }
+ public void run() {
+ try {
+ long i = 0L;
+ while (keepGoing) {
+ i += smi.ConstOrIGet();
+ }
+ } catch (Throwable ignored) { }
+ }
+}
+
class InfiniteWhileLoopWithIntrinsic extends SimpleLoopThread {
private String[] strings = { "a", "b", "c", "d" };
private int sum = 0;
diff --git a/test/115-native-bridge/expected.txt b/test/115-native-bridge/expected.txt
index 464d2c8..372ecd0 100644
--- a/test/115-native-bridge/expected.txt
+++ b/test/115-native-bridge/expected.txt
@@ -61,3 +61,4 @@
trampoline_Java_Main_testNewStringObject called!
Getting trampoline for Java_Main_testSignal with shorty I.
NB signal handler with signal 11.
+NB signal handler with signal 4.
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index c8141a7..a6a6e08 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -200,8 +200,9 @@
#if !defined(__APPLE__) && !defined(__mips__)
tmp.sa_restorer = nullptr;
#endif
- sigaction(SIGSEGV, &tmp, nullptr);
+ // Test segv
+ sigaction(SIGSEGV, &tmp, nullptr);
#if defined(__arm__) || defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
// On supported architectures we cause a real SEGV.
*go_away_compiler = 'a';
@@ -209,6 +210,11 @@
// On other architectures we simulate SEGV.
kill(getpid(), SIGSEGV);
#endif
+
+ // Test sigill
+ sigaction(SIGILL, &tmp, nullptr);
+ kill(getpid(), SIGILL);
+
return 1234;
}
@@ -385,27 +391,29 @@
// 004-SignalTest.
static bool nb_signalhandler(int sig, siginfo_t* info ATTRIBUTE_UNUSED, void* context) {
printf("NB signal handler with signal %d.\n", sig);
+ if (sig == SIGSEGV) {
#if defined(__arm__)
- struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
- struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
- sc->arm_pc += 2; // Skip instruction causing segv.
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+ sc->arm_pc += 2; // Skip instruction causing segv & sigill.
#elif defined(__aarch64__)
- struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
- struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
- sc->pc += 4; // Skip instruction causing segv.
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+ sc->pc += 4; // Skip instruction causing segv & sigill.
#elif defined(__i386__) || defined(__x86_64__)
- struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
- uc->CTX_EIP += 3;
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ uc->CTX_EIP += 3;
#else
- UNUSED(context);
+ UNUSED(context);
#endif
+ }
// We handled this...
return true;
}
static ::android::NativeBridgeSignalHandlerFn native_bridge_get_signal_handler(int signal) {
- // Only test segfault handler.
- if (signal == SIGSEGV) {
+ // Test segv for already claimed signal, and sigill for not claimed signal
+ if ((signal == SIGSEGV) || (signal == SIGILL)) {
return &nb_signalhandler;
}
return nullptr;
diff --git a/test/441-checker-inliner/src/Main.java b/test/441-checker-inliner/src/Main.java
index 4db116a..c108a90 100644
--- a/test/441-checker-inliner/src/Main.java
+++ b/test/441-checker-inliner/src/Main.java
@@ -157,6 +157,31 @@
return x;
}
+ /// CHECK-START: int Main.returnAbs(int) intrinsics_recognition (before)
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.returnAbs(int) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:MathAbsInt
+ /// CHECK-DAG: Return [<<Result>>]
+
+ private static int returnAbs(int i) {
+ return Math.abs(i);
+ }
+
+ /// CHECK-START: int Main.InlinedIntrinsicsAreStillIntrinsic() inliner (before)
+ /// CHECK-DAG: <<ConstMinus1:i\d+>> IntConstant -1
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.InlinedIntrinsicsAreStillIntrinsic() inliner (after)
+ /// CHECK-DAG: <<ConstMinus1:i\d+>> IntConstant -1
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:MathAbsInt
+ /// CHECK-DAG: Return [<<Result>>]
+
+ public static int InlinedIntrinsicsAreStillIntrinsic() {
+ return returnAbs(-1);
+ }
private static void returnVoid() {
return;
@@ -238,5 +263,13 @@
if (InlineWithControlFlow(false) != 2) {
throw new Error();
}
+
+ if (InlinedIntrinsicsAreStillIntrinsic() != 1) {
+ throw new Error();
+ }
+
+ if (returnAbs(-1) != 1) {
+ throw new Error();
+ }
}
}
diff --git a/test/449-checker-bce/expected.txt b/test/449-checker-bce/expected.txt
index e69de29..e114c50 100644
--- a/test/449-checker-bce/expected.txt
+++ b/test/449-checker-bce/expected.txt
@@ -0,0 +1 @@
+java.lang.ArrayIndexOutOfBoundsException: length=5; index=82
diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java
index c4f7ddb..a746664 100644
--- a/test/449-checker-bce/src/Main.java
+++ b/test/449-checker-bce/src/Main.java
@@ -1101,6 +1101,28 @@
}
+ public void testExceptionMessage() {
+ short[] B1 = new short[5];
+ int[] B2 = new int[5];
+ Exception err = null;
+ try {
+ testExceptionMessage1(B1, B2, null, -1, 6);
+ } catch (Exception e) {
+ err = e;
+ }
+ System.out.println(err);
+ }
+
+ void testExceptionMessage1(short[] a1, int[] a2, long a3[], int start, int finish) {
+ int j = finish + 77;
+ // Bug: 22665511
+ // A deoptimization will be triggered here right before the loop. Need to make
+ // sure the value of j is preserved for the interpreter.
+ for (int i = start; i <= finish; i++) {
+ a2[j - 1] = a1[i + 1];
+ }
+ }
+
// Make sure this method is compiled with optimizing.
/// CHECK-START: void Main.main(java.lang.String[]) register (after)
/// CHECK: ParallelMove
@@ -1141,6 +1163,7 @@
};
testUnknownBounds();
+ new Main().testExceptionMessage();
}
}
diff --git a/test/450-checker-types/src/Main.java b/test/450-checker-types/src/Main.java
index 4e14b90..251a53e 100644
--- a/test/450-checker-types/src/Main.java
+++ b/test/450-checker-types/src/Main.java
@@ -483,6 +483,24 @@
s.$noinline$f();
}
+ /// CHECK-START: java.lang.String Main.checkcastPreserveNullCheck(java.lang.Object) reference_type_propagation_after_inlining (after)
+ /// CHECK: <<This:l\d+>> ParameterValue
+ /// CHECK: <<Param:l\d+>> ParameterValue
+ /// CHECK: <<Clazz:l\d+>> LoadClass
+ /// CHECK: CheckCast [<<Param>>,<<Clazz>>]
+ /// CHECK: BoundType [<<Param>>] can_be_null:true
+
+ /// CHECK-START: java.lang.String Main.checkcastPreserveNullCheck(java.lang.Object) instruction_simplifier_after_types (after)
+ /// CHECK: <<This:l\d+>> ParameterValue
+ /// CHECK: <<Param:l\d+>> ParameterValue
+ /// CHECK: <<Clazz:l\d+>> LoadClass
+ /// CHECK: CheckCast [<<Param>>,<<Clazz>>]
+ /// CHECK: <<Bound:l\d+>> BoundType [<<Param>>]
+ /// CHECK: NullCheck [<<Bound>>]
+ public String checkcastPreserveNullCheck(Object a) {
+ return ((SubclassA)a).toString();
+ }
+
public static void main(String[] args) {
}
}
diff --git a/test/454-get-vreg/get_vreg_jni.cc b/test/454-get-vreg/get_vreg_jni.cc
index 33bdc20..9facfdb 100644
--- a/test/454-get-vreg/get_vreg_jni.cc
+++ b/test/454-get-vreg/get_vreg_jni.cc
@@ -28,12 +28,12 @@
class TestVisitor : public StackVisitor {
public:
TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
this_value_(this_value),
found_method_index_(0) {}
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/455-set-vreg/set_vreg_jni.cc b/test/455-set-vreg/set_vreg_jni.cc
index 7541189..21149f6 100644
--- a/test/455-set-vreg/set_vreg_jni.cc
+++ b/test/455-set-vreg/set_vreg_jni.cc
@@ -28,11 +28,11 @@
class TestVisitor : public StackVisitor {
public:
TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
this_value_(this_value) {}
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/457-regs/regs_jni.cc b/test/457-regs/regs_jni.cc
index 96f0e52..c21168b 100644
--- a/test/457-regs/regs_jni.cc
+++ b/test/457-regs/regs_jni.cc
@@ -28,10 +28,10 @@
class TestVisitor : public StackVisitor {
public:
TestVisitor(Thread* thread, Context* context)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/461-get-reference-vreg/get_reference_vreg_jni.cc b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
index 23fe43d..8108c97 100644
--- a/test/461-get-reference-vreg/get_reference_vreg_jni.cc
+++ b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
@@ -28,12 +28,12 @@
class TestVisitor : public StackVisitor {
public:
TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
this_value_(this_value),
found_method_index_(0) {}
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index c4f415b..9b32fc3 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -27,10 +27,10 @@
class TestVisitor : public StackVisitor {
public:
- TestVisitor(Thread* thread, Context* context) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ TestVisitor(Thread* thread, Context* context) SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/484-checker-register-hints/src/Main.java b/test/484-checker-register-hints/src/Main.java
index 3715ca2..6e68f7c 100644
--- a/test/484-checker-register-hints/src/Main.java
+++ b/test/484-checker-register-hints/src/Main.java
@@ -16,6 +16,14 @@
public class Main {
+ static class Foo {
+ int field0;
+ int field1;
+ int field2;
+ int field3;
+ int field4;
+ };
+
/// CHECK-START: void Main.test1(boolean, int, int, int, int, int) register (after)
/// CHECK: name "B0"
/// CHECK-NOT: ParallelMove
@@ -25,7 +33,7 @@
/// CHECK-NOT: ParallelMove
/// CHECK: name "B3"
/// CHECK-NOT: end_block
- /// CHECK: ArraySet
+ /// CHECK: InstanceFieldSet
// We could check here that there is a parallel move, but it's only valid
// for some architectures (for example x86), as other architectures may
// not do move at all.
@@ -36,19 +44,19 @@
int e = live1;
int f = live2;
int g = live3;
+ int j = live0;
if (z) {
} else {
// Create enough live instructions to force spilling on x86.
int h = live4;
int i = live5;
- array[2] = e + i + h;
- array[3] = f + i + h;
- array[4] = g + i + h;
- array[0] = h;
- array[1] = i + h;
-
+ foo.field2 = e + i + h;
+ foo.field3 = f + i + h;
+ foo.field4 = g + i + h;
+ foo.field0 = h;
+ foo.field1 = i + h;
}
- live1 = e + f + g;
+ live1 = e + f + g + j;
}
/// CHECK-START: void Main.test2(boolean, int, int, int, int, int) register (after)
@@ -60,7 +68,7 @@
/// CHECK-NOT: ParallelMove
/// CHECK: name "B3"
/// CHECK-NOT: end_block
- /// CHECK: ArraySet
+ /// CHECK: InstanceFieldSet
// We could check here that there is a parallel move, but it's only valid
// for some architectures (for example x86), as other architectures may
// not do move at all.
@@ -71,18 +79,19 @@
int e = live1;
int f = live2;
int g = live3;
+ int j = live0;
if (z) {
if (y) {
int h = live4;
int i = live5;
- array[2] = e + i + h;
- array[3] = f + i + h;
- array[4] = g + i + h;
- array[0] = h;
- array[1] = i + h;
+ foo.field2 = e + i + h;
+ foo.field3 = f + i + h;
+ foo.field4 = g + i + h;
+ foo.field0 = h;
+ foo.field1 = i + h;
}
}
- live1 = e + f + g;
+ live1 = e + f + g + j;
}
/// CHECK-START: void Main.test3(boolean, int, int, int, int, int) register (after)
@@ -94,7 +103,7 @@
/// CHECK-NOT: ParallelMove
/// CHECK: name "B6"
/// CHECK-NOT: end_block
- /// CHECK: ArraySet
+ /// CHECK: InstanceFieldSet
// We could check here that there is a parallel move, but it's only valid
// for some architectures (for example x86), as other architectures may
// not do move at all.
@@ -107,6 +116,7 @@
int e = live1;
int f = live2;
int g = live3;
+ int j = live0;
if (z) {
live1 = e;
} else {
@@ -115,24 +125,25 @@
} else {
int h = live4;
int i = live5;
- array[2] = e + i + h;
- array[3] = f + i + h;
- array[4] = g + i + h;
- array[0] = h;
- array[1] = i + h;
+ foo.field2 = e + i + h;
+ foo.field3 = f + i + h;
+ foo.field4 = g + i + h;
+ foo.field0 = h;
+ foo.field1 = i + h;
}
}
- live1 = e + f + g;
+ live1 = e + f + g + j;
}
public static void main(String[] args) {
}
static boolean y;
+ static int live0;
static int live1;
static int live2;
static int live3;
static int live4;
static int live5;
- static int[] array;
+ static Foo foo;
}
diff --git a/test/525-arrays-and-fields/expected.txt b/test/525-checker-arrays-and-fields/expected.txt
similarity index 100%
rename from test/525-arrays-and-fields/expected.txt
rename to test/525-checker-arrays-and-fields/expected.txt
diff --git a/test/525-arrays-and-fields/info.txt b/test/525-checker-arrays-and-fields/info.txt
similarity index 100%
rename from test/525-arrays-and-fields/info.txt
rename to test/525-checker-arrays-and-fields/info.txt
diff --git a/test/525-arrays-and-fields/src/Main.java b/test/525-checker-arrays-and-fields/src/Main.java
similarity index 62%
rename from test/525-arrays-and-fields/src/Main.java
rename to test/525-checker-arrays-and-fields/src/Main.java
index cb1e4af..a635a51 100644
--- a/test/525-arrays-and-fields/src/Main.java
+++ b/test/525-checker-arrays-and-fields/src/Main.java
@@ -80,56 +80,129 @@
//
// Loops on static arrays with invariant static field references.
+ // The checker is used to ensure hoisting occurred.
//
+ /// CHECK-START: void Main.SInvLoopZ() licm (before)
+ /// CHECK-DAG: StaticFieldGet loop:{{B\d+}}
+ /// CHECK-DAG: StaticFieldGet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.SInvLoopZ() licm (after)
+ /// CHECK-DAG: StaticFieldGet loop:none
+ /// CHECK-DAG: StaticFieldGet loop:none
+
private static void SInvLoopZ() {
for (int i = 0; i < sArrZ.length; i++) {
sArrZ[i] = sZ;
}
}
+ /// CHECK-START: void Main.SInvLoopB() licm (before)
+ /// CHECK-DAG: StaticFieldGet loop:{{B\d+}}
+ /// CHECK-DAG: StaticFieldGet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.SInvLoopB() licm (after)
+ /// CHECK-DAG: StaticFieldGet loop:none
+ /// CHECK-DAG: StaticFieldGet loop:none
+
private static void SInvLoopB() {
for (int i = 0; i < sArrB.length; i++) {
sArrB[i] = sB;
}
}
+ /// CHECK-START: void Main.SInvLoopC() licm (before)
+ /// CHECK-DAG: StaticFieldGet loop:{{B\d+}}
+ /// CHECK-DAG: StaticFieldGet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.SInvLoopC() licm (after)
+ /// CHECK-DAG: StaticFieldGet loop:none
+ /// CHECK-DAG: StaticFieldGet loop:none
+
private static void SInvLoopC() {
for (int i = 0; i < sArrC.length; i++) {
sArrC[i] = sC;
}
}
+ /// CHECK-START: void Main.SInvLoopS() licm (before)
+ /// CHECK-DAG: StaticFieldGet loop:{{B\d+}}
+ /// CHECK-DAG: StaticFieldGet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.SInvLoopS() licm (after)
+ /// CHECK-DAG: StaticFieldGet loop:none
+ /// CHECK-DAG: StaticFieldGet loop:none
+
private static void SInvLoopS() {
for (int i = 0; i < sArrS.length; i++) {
sArrS[i] = sS;
}
}
+ /// CHECK-START: void Main.SInvLoopI() licm (before)
+ /// CHECK-DAG: StaticFieldGet loop:{{B\d+}}
+ /// CHECK-DAG: StaticFieldGet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.SInvLoopI() licm (after)
+ /// CHECK-DAG: StaticFieldGet loop:none
+ /// CHECK-DAG: StaticFieldGet loop:none
+
private static void SInvLoopI() {
for (int i = 0; i < sArrI.length; i++) {
sArrI[i] = sI;
}
}
+ /// CHECK-START: void Main.SInvLoopJ() licm (before)
+ /// CHECK-DAG: StaticFieldGet loop:{{B\d+}}
+ /// CHECK-DAG: StaticFieldGet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.SInvLoopJ() licm (after)
+ /// CHECK-DAG: StaticFieldGet loop:none
+ /// CHECK-DAG: StaticFieldGet loop:none
+
private static void SInvLoopJ() {
for (int i = 0; i < sArrJ.length; i++) {
sArrJ[i] = sJ;
}
}
+ /// CHECK-START: void Main.SInvLoopF() licm (before)
+ /// CHECK-DAG: StaticFieldGet loop:{{B\d+}}
+ /// CHECK-DAG: StaticFieldGet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.SInvLoopF() licm (after)
+ /// CHECK-DAG: StaticFieldGet loop:none
+ /// CHECK-DAG: StaticFieldGet loop:none
+
private static void SInvLoopF() {
for (int i = 0; i < sArrF.length; i++) {
sArrF[i] = sF;
}
}
+ /// CHECK-START: void Main.SInvLoopD() licm (before)
+ /// CHECK-DAG: StaticFieldGet loop:{{B\d+}}
+ /// CHECK-DAG: StaticFieldGet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.SInvLoopD() licm (after)
+ /// CHECK-DAG: StaticFieldGet loop:none
+ /// CHECK-DAG: StaticFieldGet loop:none
+
private static void SInvLoopD() {
for (int i = 0; i < sArrD.length; i++) {
sArrD[i] = sD;
}
}
+ /// CHECK-START: void Main.SInvLoopL() licm (before)
+ /// CHECK-DAG: StaticFieldGet loop:{{B\d+}}
+ /// CHECK-DAG: StaticFieldGet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.SInvLoopL() licm (after)
+ /// CHECK-DAG: StaticFieldGet loop:none
+ /// CHECK-DAG: StaticFieldGet loop:none
+
private static void SInvLoopL() {
for (int i = 0; i < sArrL.length; i++) {
sArrL[i] = sL;
@@ -138,6 +211,7 @@
//
// Loops on static arrays with variant static field references.
+ // Incorrect hoisting is detected by incorrect outcome.
//
private static void SVarLoopZ() {
@@ -214,56 +288,130 @@
//
// Loops on static arrays with a cross-over reference.
+ // Incorrect hoisting is detected by incorrect outcome.
+ // In addition, the checker is used to detect no hoisting.
//
+ /// CHECK-START: void Main.SCrossOverLoopZ() licm (before)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.SCrossOverLoopZ() licm (after)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
private static void SCrossOverLoopZ() {
for (int i = 0; i < sArrZ.length; i++) {
sArrZ[i] = !sArrZ[20];
}
}
+ /// CHECK-START: void Main.SCrossOverLoopB() licm (before)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.SCrossOverLoopB() licm (after)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
private static void SCrossOverLoopB() {
for (int i = 0; i < sArrB.length; i++) {
sArrB[i] = (byte)(sArrB[20] + 2);
}
}
+ /// CHECK-START: void Main.SCrossOverLoopC() licm (before)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.SCrossOverLoopC() licm (after)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
private static void SCrossOverLoopC() {
for (int i = 0; i < sArrC.length; i++) {
sArrC[i] = (char)(sArrC[20] + 2);
}
}
+ /// CHECK-START: void Main.SCrossOverLoopS() licm (before)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.SCrossOverLoopS() licm (after)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
private static void SCrossOverLoopS() {
for (int i = 0; i < sArrS.length; i++) {
sArrS[i] = (short)(sArrS[20] + 2);
}
}
+ /// CHECK-START: void Main.SCrossOverLoopI() licm (before)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.SCrossOverLoopI() licm (after)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
private static void SCrossOverLoopI() {
for (int i = 0; i < sArrI.length; i++) {
sArrI[i] = sArrI[20] + 2;
}
}
+ /// CHECK-START: void Main.SCrossOverLoopJ() licm (before)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.SCrossOverLoopJ() licm (after)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
private static void SCrossOverLoopJ() {
for (int i = 0; i < sArrJ.length; i++) {
sArrJ[i] = sArrJ[20] + 2;
}
}
+ /// CHECK-START: void Main.SCrossOverLoopF() licm (before)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.SCrossOverLoopF() licm (after)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
private static void SCrossOverLoopF() {
for (int i = 0; i < sArrF.length; i++) {
sArrF[i] = sArrF[20] + 2;
}
}
+ /// CHECK-START: void Main.SCrossOverLoopD() licm (before)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.SCrossOverLoopD() licm (after)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
private static void SCrossOverLoopD() {
for (int i = 0; i < sArrD.length; i++) {
sArrD[i] = sArrD[20] + 2;
}
}
+ /// CHECK-START: void Main.SCrossOverLoopL() licm (before)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.SCrossOverLoopL() licm (after)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
private static void SCrossOverLoopL() {
for (int i = 0; i < sArrL.length; i++) {
sArrL[i] = (sArrL[20] == anObject) ? anotherObject : anObject;
@@ -272,56 +420,129 @@
//
// Loops on instance arrays with invariant instance field references.
+ // The checker is used to ensure hoisting occurred.
//
+ /// CHECK-START: void Main.InvLoopZ() licm (before)
+ /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}}
+ /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.InvLoopZ() licm (after)
+ /// CHECK-DAG: InstanceFieldGet loop:none
+ /// CHECK-DAG: InstanceFieldGet loop:none
+
private void InvLoopZ() {
for (int i = 0; i < mArrZ.length; i++) {
mArrZ[i] = mZ;
}
}
+ /// CHECK-START: void Main.InvLoopB() licm (before)
+ /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}}
+ /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.InvLoopB() licm (after)
+ /// CHECK-DAG: InstanceFieldGet loop:none
+ /// CHECK-DAG: InstanceFieldGet loop:none
+
private void InvLoopB() {
for (int i = 0; i < mArrB.length; i++) {
mArrB[i] = mB;
}
}
+ /// CHECK-START: void Main.InvLoopC() licm (before)
+ /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}}
+ /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.InvLoopC() licm (after)
+ /// CHECK-DAG: InstanceFieldGet loop:none
+ /// CHECK-DAG: InstanceFieldGet loop:none
+
private void InvLoopC() {
for (int i = 0; i < mArrC.length; i++) {
mArrC[i] = mC;
}
}
+ /// CHECK-START: void Main.InvLoopS() licm (before)
+ /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}}
+ /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.InvLoopS() licm (after)
+ /// CHECK-DAG: InstanceFieldGet loop:none
+ /// CHECK-DAG: InstanceFieldGet loop:none
+
private void InvLoopS() {
for (int i = 0; i < mArrS.length; i++) {
mArrS[i] = mS;
}
}
+ /// CHECK-START: void Main.InvLoopI() licm (before)
+ /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}}
+ /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.InvLoopI() licm (after)
+ /// CHECK-DAG: InstanceFieldGet loop:none
+ /// CHECK-DAG: InstanceFieldGet loop:none
+
private void InvLoopI() {
for (int i = 0; i < mArrI.length; i++) {
mArrI[i] = mI;
}
}
+ /// CHECK-START: void Main.InvLoopJ() licm (before)
+ /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}}
+ /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.InvLoopJ() licm (after)
+ /// CHECK-DAG: InstanceFieldGet loop:none
+ /// CHECK-DAG: InstanceFieldGet loop:none
+
private void InvLoopJ() {
for (int i = 0; i < mArrJ.length; i++) {
mArrJ[i] = mJ;
}
}
+ /// CHECK-START: void Main.InvLoopF() licm (before)
+ /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}}
+ /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.InvLoopF() licm (after)
+ /// CHECK-DAG: InstanceFieldGet loop:none
+ /// CHECK-DAG: InstanceFieldGet loop:none
+
private void InvLoopF() {
for (int i = 0; i < mArrF.length; i++) {
mArrF[i] = mF;
}
}
+ /// CHECK-START: void Main.InvLoopD() licm (before)
+ /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}}
+ /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.InvLoopD() licm (after)
+ /// CHECK-DAG: InstanceFieldGet loop:none
+ /// CHECK-DAG: InstanceFieldGet loop:none
+
private void InvLoopD() {
for (int i = 0; i < mArrD.length; i++) {
mArrD[i] = mD;
}
}
+ /// CHECK-START: void Main.InvLoopL() licm (before)
+ /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}}
+ /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.InvLoopL() licm (after)
+ /// CHECK-DAG: InstanceFieldGet loop:none
+ /// CHECK-DAG: InstanceFieldGet loop:none
+
private void InvLoopL() {
for (int i = 0; i < mArrL.length; i++) {
mArrL[i] = mL;
@@ -330,6 +551,7 @@
//
// Loops on instance arrays with variant instance field references.
+ // Incorrect hoisting is detected by incorrect outcome.
//
private void VarLoopZ() {
@@ -406,56 +628,130 @@
//
// Loops on instance arrays with a cross-over reference.
+ // Incorrect hoisting is detected by incorrect outcome.
+ // In addition, the checker is used to detect no hoisting.
//
+ /// CHECK-START: void Main.CrossOverLoopZ() licm (before)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.CrossOverLoopZ() licm (after)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
private void CrossOverLoopZ() {
for (int i = 0; i < mArrZ.length; i++) {
mArrZ[i] = !mArrZ[20];
}
}
+ /// CHECK-START: void Main.CrossOverLoopB() licm (before)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.CrossOverLoopB() licm (after)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
private void CrossOverLoopB() {
for (int i = 0; i < mArrB.length; i++) {
mArrB[i] = (byte)(mArrB[20] + 2);
}
}
+ /// CHECK-START: void Main.CrossOverLoopC() licm (before)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.CrossOverLoopC() licm (after)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
private void CrossOverLoopC() {
for (int i = 0; i < mArrC.length; i++) {
mArrC[i] = (char)(mArrC[20] + 2);
}
}
+ /// CHECK-START: void Main.CrossOverLoopS() licm (before)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.CrossOverLoopS() licm (after)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
private void CrossOverLoopS() {
for (int i = 0; i < mArrS.length; i++) {
mArrS[i] = (short)(mArrS[20] + 2);
}
}
+ /// CHECK-START: void Main.CrossOverLoopI() licm (before)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.CrossOverLoopI() licm (after)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
private void CrossOverLoopI() {
for (int i = 0; i < mArrI.length; i++) {
mArrI[i] = mArrI[20] + 2;
}
}
+ /// CHECK-START: void Main.CrossOverLoopJ() licm (before)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.CrossOverLoopJ() licm (after)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
private void CrossOverLoopJ() {
for (int i = 0; i < mArrJ.length; i++) {
mArrJ[i] = mArrJ[20] + 2;
}
}
+ /// CHECK-START: void Main.CrossOverLoopF() licm (before)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.CrossOverLoopF() licm (after)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
private void CrossOverLoopF() {
for (int i = 0; i < mArrF.length; i++) {
mArrF[i] = mArrF[20] + 2;
}
}
+ /// CHECK-START: void Main.CrossOverLoopD() licm (before)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.CrossOverLoopD() licm (after)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
private void CrossOverLoopD() {
for (int i = 0; i < mArrD.length; i++) {
mArrD[i] = mArrD[20] + 2;
}
}
+ /// CHECK-START: void Main.CrossOverLoopL() licm (before)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
+ /// CHECK-START: void Main.CrossOverLoopL() licm (after)
+ /// CHECK-DAG: ArrayGet loop:{{B\d+}}
+ /// CHECK-DAG: ArraySet loop:{{B\d+}}
+
private void CrossOverLoopL() {
for (int i = 0; i < mArrL.length; i++) {
mArrL[i] = (mArrL[20] == anObject) ? anotherObject : anObject;
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index fd9fcaf..728ccea 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -36,4 +36,5 @@
b/22411633 (3)
b/22411633 (4)
b/22411633 (5)
+b/22777307
Done!
diff --git a/test/800-smali/smali/b_22777307.smali b/test/800-smali/smali/b_22777307.smali
new file mode 100644
index 0000000..6de3c70
--- /dev/null
+++ b/test/800-smali/smali/b_22777307.smali
@@ -0,0 +1,18 @@
+.class public LB22777307;
+.super Ljava/lang/Object;
+
+# A static field. That way we can use the reference.
+.field private static sTest:Ljava/lang/Object;
+
+.method public static run()V
+.registers 2
+ # This is a broken new-instance. It needs to throw at runtime, though. This test is here to
+ # ensure we won't produce a VerifyError.
+ # Cloneable was chosen because it's an already existing interface.
+ new-instance v0, Ljava/lang/Cloneable;
+ invoke-direct {v0}, Ljava/lang/Cloneable;-><init>()V
+ sput-object v0, LB22777307;->sTest:Ljava/lang/Object;
+
+ return-void
+
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index 8da2af4..438e214 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -119,6 +119,8 @@
new VerifyError(), null));
testCases.add(new TestCase("b/22411633 (5)", "B22411633_5", "run", new Object[] { false },
null, null));
+ testCases.add(new TestCase("b/22777307", "B22777307", "run", null, new InstantiationError(),
+ null));
}
public void runTests() {
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 3d5c483..3698bc8 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -113,7 +113,7 @@
COMPILER_TYPES += default
endif
ifeq ($(ART_TEST_INTERPRETER_ACCESS_CHECKS),true)
- COMPILER_TYPES += interpreter-access-checks
+ COMPILER_TYPES += interp-ac
endif
ifeq ($(ART_TEST_INTERPRETER),true)
COMPILER_TYPES += interpreter
@@ -277,9 +277,9 @@
506-verify-aput \
800-smali
-ifneq (,$(filter interpreter-access-checks,$(COMPILER_TYPES)))
+ifneq (,$(filter interp-ac,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- interpreter-access-checks,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ interp-ac,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
$(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS), $(ALL_ADDRESS_SIZES))
endif
@@ -629,7 +629,7 @@
# Create a rule to build and run a tests following the form:
# test-art-{1: host or target}-run-test-{2: debug ndebug}-{3: prebuild no-prebuild no-dex2oat}-
-# {4: interpreter default optimizing jit interpreter-access-checks}-
+# {4: interpreter default optimizing jit interp-ac}-
# {5: relocate nrelocate relocate-npatchoat}-
# {6: trace or ntrace}-{7: gcstress gcverify cms}-{8: forcecopy checkjni jni}-
# {9: no-image image picimage}-{10: pictest npictest}-
@@ -700,7 +700,7 @@
ifeq ($(4),interpreter)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_INTERPRETER_RULES
run_test_options += --interpreter
- else ifeq ($(4),interpreter-access-checks)
+ else ifeq ($(4),interp-ac)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_INTERPRETER_ACCESS_CHECKS_RULES
run_test_options += --interpreter --verify-soft-fail
else
diff --git a/test/run-test b/test/run-test
index eabbab3..934329f 100755
--- a/test/run-test
+++ b/test/run-test
@@ -264,7 +264,7 @@
shift
elif [ "x$1" = "x--verify-soft-fail" ]; then
run_args="${run_args} --verify-soft-fail"
- image_suffix="-interpreter-access-checks"
+ image_suffix="-interp-ac"
shift
elif [ "x$1" = "x--no-optimize" ]; then
run_args="${run_args} --no-optimize"
@@ -621,9 +621,13 @@
USE_JACK="false"
if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" -a "$target_mode" = "no" -a "$debuggable" = "no" ]; then
- run_checker="yes"
- run_args="${run_args} -Xcompiler-option --dump-cfg=$tmp_dir/$cfg_output \
- -Xcompiler-option -j1"
+ # In no-prebuild mode, the compiler is only invoked if both dex2oat and
+ # patchoat are available. Disable Checker otherwise (b/22552692).
+ if [ "$prebuild_mode" = "yes" ] || [ "$have_patchoat" = "yes" -a "$have_dex2oat" = "yes" ]; then
+ run_checker="yes"
+ run_args="${run_args} -Xcompiler-option --dump-cfg=$tmp_dir/$cfg_output \
+ -Xcompiler-option -j1"
+ fi
fi
fi
diff --git a/tools/checker/checker.py b/tools/checker/checker.py
index ed630e3..4e516de 100755
--- a/tools/checker/checker.py
+++ b/tools/checker/checker.py
@@ -62,7 +62,7 @@
def FindCheckerFiles(path):
""" Returns a list of files to scan for check annotations in the given path.
Path to a file is returned as a single-element list, directories are
- recursively traversed and all '.java' files returned.
+ recursively traversed and all '.java' and '.smali' files returned.
"""
if not path:
Logger.fail("No source path provided")
diff --git a/tools/checker/file_format/checker/parser.py b/tools/checker/file_format/checker/parser.py
index 33735cb..f354395 100644
--- a/tools/checker/file_format/checker/parser.py
+++ b/tools/checker/file_format/checker/parser.py
@@ -76,7 +76,7 @@
if notLine is not None:
return (notLine, TestAssertion.Variant.Not, lineNo), None
- Logger.fail("Checker assertion could not be parsed", fileName, lineNo)
+ Logger.fail("Checker assertion could not be parsed: '" + line + "'", fileName, lineNo)
def __isMatchAtStart(match):
""" Tests if the given Match occurred at the beginning of the line. """
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 992a8a6..7ada189 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -150,5 +150,12 @@
result: EXEC_FAILED,
modes: [device],
names: ["org.apache.harmony.tests.java.lang.ClassTest#test_forNameLjava_lang_String"]
+},
+{
+ description: "TimeZoneTest.testAllDisplayNames times out, needs investigation",
+ result: EXEC_TIMEOUT,
+ modes: [device],
+ names: ["libcore.java.util.TimeZoneTest#testAllDisplayNames"],
+ bug: 22786792
}
]