Move to newer clang annotations
Also enable -Wthread-safety-negative.
Changes:
Switch to capabilities and negative capabilities.
Future work:
Use capabilities to implement uninterruptible annotations to work
with AssertNoThreadSuspension.
Bug: 20072211
Change-Id: I42fcbe0300d98a831c89d1eff3ecd5a7e99ebf33
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
index 6240b3b..845d156 100644
--- a/runtime/mirror/abstract_method.h
+++ b/runtime/mirror/abstract_method.h
@@ -34,12 +34,12 @@
class MANAGED AbstractMethod : public AccessibleObject {
public:
// Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod.
- bool CreateFromArtMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool CreateFromArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* GetArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetArtMethod() SHARED_REQUIRES(Locks::mutator_lock_);
// Only used by the image writer.
- void SetArtMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
private:
static MemberOffset ArtMethodOffset() {
diff --git a/runtime/mirror/accessible_object.h b/runtime/mirror/accessible_object.h
index 6d4c0f6..dcf5118 100644
--- a/runtime/mirror/accessible_object.h
+++ b/runtime/mirror/accessible_object.h
@@ -36,12 +36,12 @@
}
template<bool kTransactionActive>
- void SetAccessible(bool value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetAccessible(bool value) SHARED_REQUIRES(Locks::mutator_lock_) {
UNUSED(padding_);
return SetFieldBoolean<kTransactionActive>(FlagOffset(), value ? 1u : 0u);
}
- bool IsAccessible() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsAccessible() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldBoolean(FlagOffset());
}
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 88d75ab..3d54029 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -101,7 +101,7 @@
}
void operator()(Object* obj, size_t usable_size) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
UNUSED(usable_size);
// Avoid AsArray as object is not yet in live bitmap or allocation stack.
Array* array = down_cast<Array*>(obj);
@@ -126,7 +126,7 @@
}
void operator()(Object* obj, size_t usable_size) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Avoid AsArray as object is not yet in live bitmap or allocation stack.
Array* array = down_cast<Array*>(obj);
// DCHECK(array->IsArrayInstance());
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index d72c03f..4128689 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -43,7 +43,7 @@
static Array* RecursiveCreateMultiArray(Thread* self,
Handle<Class> array_class, int current_dimension,
Handle<mirror::IntArray> dimensions)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
int32_t array_length = dimensions->Get(current_dimension);
StackHandleScope<1> hs(self);
Handle<Array> new_array(
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index e65611d..08c12e9 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -39,21 +39,21 @@
template <bool kIsInstrumented, bool kFillUsable = false>
ALWAYS_INLINE static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
size_t component_size_shift, gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static Array* CreateMultiArray(Thread* self, Handle<Class> element_class,
Handle<IntArray> dimensions)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE int32_t GetLength() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Array, length_));
}
- void SetLength(int32_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetLength(int32_t length) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK_GE(length, 0);
// We use non transactional version since we can't undo this write. We also disable checking
// since it would fail during a transaction.
@@ -67,7 +67,7 @@
static MemberOffset DataOffset(size_t component_size);
void* GetRawData(size_t component_size, int32_t index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
intptr_t data = reinterpret_cast<intptr_t>(this) + DataOffset(component_size).Int32Value() +
+ (index * component_size);
return reinterpret_cast<void*>(data);
@@ -82,16 +82,16 @@
// Returns true if the index is valid. If not, throws an ArrayIndexOutOfBoundsException and
// returns false.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) SHARED_REQUIRES(Locks::mutator_lock_);
- Array* CopyOf(Thread* self, int32_t new_length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Array* CopyOf(Thread* self, int32_t new_length) SHARED_REQUIRES(Locks::mutator_lock_);
protected:
- void ThrowArrayStoreException(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ThrowArrayStoreException(Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
private:
void ThrowArrayIndexOutOfBoundsException(int32_t index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// The number of array elements.
int32_t length_;
@@ -107,32 +107,32 @@
typedef T ElementType;
static PrimitiveArray<T>* Alloc(Thread* self, size_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- const T* GetData() const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const T* GetData() const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
return reinterpret_cast<const T*>(GetRawData(sizeof(T), 0));
}
- T* GetData() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ T* GetData() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
return reinterpret_cast<T*>(GetRawData(sizeof(T), 0));
}
- T Get(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ T Get(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
- T GetWithoutChecks(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ T GetWithoutChecks(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(CheckIsValidIndex(i));
return GetData()[i];
}
- void Set(int32_t i, T value) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Set(int32_t i, T value) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ // SHARED_REQUIRES(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true>
void Set(int32_t i, T value) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS;
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ // SHARED_REQUIRES(Locks::mutator_lock_).
template<bool kTransactionActive,
bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -144,7 +144,7 @@
* and the arrays non-null.
*/
void Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Works like memcpy(), except we guarantee not to allow tearing of array values (ie using
@@ -152,7 +152,7 @@
* and the arrays non-null.
*/
void Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void SetArrayClass(Class* array_class) {
CHECK(array_class_.IsNull());
@@ -160,7 +160,7 @@
array_class_ = GcRoot<Class>(array_class);
}
- static Class* GetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static Class* GetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!array_class_.IsNull());
return array_class_.Read();
}
@@ -170,7 +170,7 @@
array_class_ = GcRoot<Class>(nullptr);
}
- static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
private:
static GcRoot<Class> array_class_;
@@ -183,11 +183,11 @@
public:
template<typename T>
T GetElementPtrSize(uint32_t idx, size_t ptr_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive = false, bool kUnchecked = false, typename T>
void SetElementPtrSize(uint32_t idx, T element, size_t ptr_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
};
} // namespace mirror
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 5bd6583..701ba4a 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -835,7 +835,7 @@
}
void operator()(mirror::Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
StackHandleScope<1> hs(self_);
Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass()));
mirror::Object::CopyObject(self_, h_new_class_obj.Get(), orig_->Get(), copy_bytes_);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index ba0a9fc..be87dee 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -127,7 +127,7 @@
};
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- Status GetStatus() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Status GetStatus() SHARED_REQUIRES(Locks::mutator_lock_) {
static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32");
return static_cast<Status>(
GetField32Volatile<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, status_)));
@@ -135,7 +135,7 @@
// This is static because 'this' may be moved by GC.
static void SetStatus(Handle<Class> h_this, Status new_status, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset StatusOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, status_);
@@ -143,146 +143,146 @@
// Returns true if the class has been retired.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsRetired() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsRetired() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() == kStatusRetired;
}
// Returns true if the class has failed to link.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsErroneous() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsErroneous() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() == kStatusError;
}
// Returns true if the class has been loaded.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsIdxLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsIdxLoaded() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusIdx;
}
// Returns true if the class has been loaded.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsLoaded() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusLoaded;
}
// Returns true if the class has been linked.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsResolved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsResolved() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusResolved;
}
// Returns true if the class was compile-time verified.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsCompileTimeVerified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsCompileTimeVerified() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusRetryVerificationAtRuntime;
}
// Returns true if the class has been verified.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsVerified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsVerified() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusVerified;
}
// Returns true if the class is initializing.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsInitializing() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsInitializing() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusInitializing;
}
// Returns true if the class is initialized.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsInitialized() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsInitialized() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() == kStatusInitialized;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset AccessFlagsOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_);
}
- void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetAccessFlags(uint32_t new_access_flags) SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if the class is an interface.
- ALWAYS_INLINE bool IsInterface() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsInterface() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccInterface) != 0;
}
// Returns true if the class is declared public.
- ALWAYS_INLINE bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccPublic) != 0;
}
// Returns true if the class is declared final.
- ALWAYS_INLINE bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccFinal) != 0;
}
- ALWAYS_INLINE bool IsFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsFinalizable() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccClassIsFinalizable) != 0;
}
- ALWAYS_INLINE void SetFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE void SetFinalizable() SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
SetAccessFlags(flags | kAccClassIsFinalizable);
}
- ALWAYS_INLINE bool IsStringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsStringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetField32(AccessFlagsOffset()) & kAccClassIsStringClass) != 0;
}
- ALWAYS_INLINE void SetStringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE void SetStringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
SetAccessFlags(flags | kAccClassIsStringClass);
}
// Returns true if the class is abstract.
- ALWAYS_INLINE bool IsAbstract() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsAbstract() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccAbstract) != 0;
}
// Returns true if the class is an annotation.
- ALWAYS_INLINE bool IsAnnotation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsAnnotation() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccAnnotation) != 0;
}
// Returns true if the class is synthetic.
- ALWAYS_INLINE bool IsSynthetic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsSynthetic() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccSynthetic) != 0;
}
// Returns true if the class can avoid access checks.
- bool IsPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPreverified() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccPreverified) != 0;
}
- void SetPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetPreverified() SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
SetAccessFlags(flags | kAccPreverified);
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsTypeOfReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsTypeOfReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags<kVerifyFlags>() & kAccClassIsReference) != 0;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsWeakReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsWeakReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags<kVerifyFlags>() & kAccClassIsWeakReference) != 0;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsSoftReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsSoftReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags<kVerifyFlags>() & kAccReferenceFlagsMask) == kAccClassIsReference;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsFinalizerReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsFinalizerReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags<kVerifyFlags>() & kAccClassIsFinalizerReference) != 0;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPhantomReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPhantomReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags<kVerifyFlags>() & kAccClassIsPhantomReference) != 0;
}
@@ -291,7 +291,7 @@
// For array classes, where all the classes are final due to there being no sub-classes, an
// Object[] may be assigned to by a String[] but a String[] may not be assigned to by other
// types as the component is final.
- bool CannotBeAssignedFromOtherTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool CannotBeAssignedFromOtherTypes() SHARED_REQUIRES(Locks::mutator_lock_) {
if (!IsArrayClass()) {
return IsFinal();
} else {
@@ -306,18 +306,18 @@
// Returns true if this class is the placeholder and should retire and
// be replaced with a class with the right size for embedded imt/vtable.
- bool IsTemp() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsTemp() SHARED_REQUIRES(Locks::mutator_lock_) {
Status s = GetStatus();
return s < Status::kStatusResolving && ShouldHaveEmbeddedImtAndVTable();
}
- String* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns the cached name.
- void SetName(String* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Sets the cached name.
+ String* GetName() SHARED_REQUIRES(Locks::mutator_lock_); // Returns the cached name.
+ void SetName(String* name) SHARED_REQUIRES(Locks::mutator_lock_); // Sets the cached name.
// Computes the name, then sets the cached value.
- static String* ComputeName(Handle<Class> h_this) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static String* ComputeName(Handle<Class> h_this) SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsProxyClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsProxyClass() SHARED_REQUIRES(Locks::mutator_lock_) {
// Read access flags without using getter as whether something is a proxy can be check in
// any loaded state
// TODO: switch to a check if the super class is java.lang.reflect.Proxy?
@@ -326,9 +326,9 @@
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- Primitive::Type GetPrimitiveType() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Primitive::Type GetPrimitiveType() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
- void SetPrimitiveType(Primitive::Type new_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetPrimitiveType(Primitive::Type new_type) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t));
int32_t v32 = static_cast<int32_t>(new_type);
DCHECK_EQ(v32 & 0xFFFF, v32) << "upper 16 bits aren't zero";
@@ -338,83 +338,83 @@
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- size_t GetPrimitiveTypeSizeShift() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t GetPrimitiveTypeSizeShift() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if the class is a primitive type.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitive() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitive() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() != Primitive::kPrimNot;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveBoolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveBoolean() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimBoolean;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveByte() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveByte() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimByte;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveChar() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveChar() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimChar;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveShort() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveShort() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimShort;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveInt() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveInt() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType() == Primitive::kPrimInt;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveLong() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveLong() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimLong;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveFloat() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveFloat() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimFloat;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveDouble() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveDouble() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimDouble;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveVoid() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveVoid() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimVoid;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsPrimitiveArray() SHARED_REQUIRES(Locks::mutator_lock_) {
return IsArrayClass<kVerifyFlags>() &&
GetComponentType<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()->
IsPrimitive();
}
// Depth of class from java.lang.Object
- uint32_t Depth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t Depth() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetComponentType<kVerifyFlags, kReadBarrierOption>() != nullptr;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsClassClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsClassClass() SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsThrowableClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsThrowableClass() SHARED_REQUIRES(Locks::mutator_lock_);
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsReferenceClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsReferenceClass() const SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset ComponentTypeOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, component_type_);
@@ -422,11 +422,11 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Class* GetComponentType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Class* GetComponentType() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<Class, kVerifyFlags, kReadBarrierOption>(ComponentTypeOffset());
}
- void SetComponentType(Class* new_component_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetComponentType(Class* new_component_type) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(GetComponentType() == nullptr);
DCHECK(new_component_type != nullptr);
// Component type is invariant: use non-transactional mode without check.
@@ -434,43 +434,43 @@
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- size_t GetComponentSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t GetComponentSize() SHARED_REQUIRES(Locks::mutator_lock_) {
return 1U << GetComponentSizeShift();
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- size_t GetComponentSizeShift() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t GetComponentSizeShift() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetComponentType<kDefaultVerifyFlags, kReadBarrierOption>()->GetPrimitiveTypeSizeShift();
}
- bool IsObjectClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsObjectClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return !IsPrimitive() && GetSuperClass() == nullptr;
}
- bool IsInstantiableNonArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsInstantiableNonArray() SHARED_REQUIRES(Locks::mutator_lock_) {
return !IsPrimitive() && !IsInterface() && !IsAbstract() && !IsArrayClass();
}
- bool IsInstantiable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsInstantiable() SHARED_REQUIRES(Locks::mutator_lock_) {
return (!IsPrimitive() && !IsInterface() && !IsAbstract()) ||
(IsAbstract() && IsArrayClass());
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsObjectArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsObjectArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetComponentType<kVerifyFlags>() != nullptr &&
!GetComponentType<kVerifyFlags>()->IsPrimitive();
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsIntArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsIntArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
auto* component_type = GetComponentType<kVerifyFlags>();
return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>();
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsLongArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsLongArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
auto* component_type = GetComponentType<kVerifyFlags>();
return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>();
@@ -479,16 +479,16 @@
// Creates a raw object instance but does not invoke the default constructor.
template<bool kIsInstrumented, bool kCheckAddFinalizer = true>
ALWAYS_INLINE Object* Alloc(Thread* self, gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
Object* AllocObject(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
Object* AllocNonMovableObject(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsVariableSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsVariableSize() SHARED_REQUIRES(Locks::mutator_lock_) {
// Classes, arrays, and strings vary in size, and so the object_size_ field cannot
// be used to Get their instance size
return IsClassClass<kVerifyFlags, kReadBarrierOption>() ||
@@ -497,17 +497,17 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- uint32_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_));
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- uint32_t GetClassSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t GetClassSize() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_));
}
void SetClassSize(uint32_t new_class_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Compute how many bytes would be used a class with the given elements.
static uint32_t ComputeClassSize(bool has_embedded_tables,
@@ -533,31 +533,31 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- uint32_t GetObjectSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t GetObjectSize() SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset ObjectSizeOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, object_size_);
}
- void SetObjectSize(uint32_t new_object_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetObjectSize(uint32_t new_object_size) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!IsVariableSize());
// Not called within a transaction.
return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
}
void SetObjectSizeWithoutChecks(uint32_t new_object_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Not called within a transaction.
return SetField32<false, false, kVerifyNone>(
OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
}
// Returns true if this class is in the same packages as that class.
- bool IsInSamePackage(Class* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsInSamePackage(Class* that) SHARED_REQUIRES(Locks::mutator_lock_);
static bool IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2);
// Returns true if this class can access that class.
- bool CanAccess(Class* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool CanAccess(Class* that) SHARED_REQUIRES(Locks::mutator_lock_) {
return that->IsPublic() || this->IsInSamePackage(that);
}
@@ -565,7 +565,7 @@
// Note that access to the class isn't checked in case the declaring class is protected and the
// method has been exposed by a public sub-class
bool CanAccessMember(Class* access_to, uint32_t member_flags)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Classes can access all of their own members
if (this == access_to) {
return true;
@@ -593,30 +593,30 @@
// referenced by the FieldId in the DexFile in case the declaring class is inaccessible.
bool CanAccessResolvedField(Class* access_to, ArtField* field,
DexCache* dex_cache, uint32_t field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool CheckResolvedFieldAccess(Class* access_to, ArtField* field,
uint32_t field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can this class access a resolved method?
// Note that access to methods's class is checked and this may require looking up the class
// referenced by the MethodId in the DexFile in case the declaring class is inaccessible.
bool CanAccessResolvedMethod(Class* access_to, ArtMethod* resolved_method,
DexCache* dex_cache, uint32_t method_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template <InvokeType throw_invoke_type>
bool CheckResolvedMethodAccess(Class* access_to, ArtMethod* resolved_method,
uint32_t method_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsSubClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsSubClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
// Can src be assigned to this class? For example, String can be assigned to Object (by an
// upcast), however, an Object cannot be assigned to a String as a potentially exception throwing
// downcast would be necessary. Similarly for interfaces, a class that implements (or an interface
// that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign
// to themselves. Classes for primitive types may not assign to each other.
- ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(src != nullptr);
if (this == src) {
// Can always assign to things of the same type.
@@ -633,9 +633,9 @@
}
}
- ALWAYS_INLINE Class* GetSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE Class* GetSuperClass() SHARED_REQUIRES(Locks::mutator_lock_);
- void SetSuperClass(Class *new_super_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetSuperClass(Class *new_super_class) SHARED_REQUIRES(Locks::mutator_lock_) {
// Super class is assigned once, except during class linker initialization.
Class* old_super_class = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
DCHECK(old_super_class == nullptr || old_super_class == new_super_class);
@@ -643,7 +643,7 @@
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class);
}
- bool HasSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool HasSuperClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetSuperClass() != nullptr;
}
@@ -651,9 +651,9 @@
return MemberOffset(OFFSETOF_MEMBER(Class, super_class_));
}
- ClassLoader* GetClassLoader() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ClassLoader* GetClassLoader() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
- void SetClassLoader(ClassLoader* new_cl) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetClassLoader(ClassLoader* new_cl) SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset DexCacheOffset() {
return MemberOffset(OFFSETOF_MEMBER(Class, dex_cache_));
@@ -665,83 +665,83 @@
kDumpClassInitialized = (1 << 2),
};
- void DumpClass(std::ostream& os, int flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void DumpClass(std::ostream& os, int flags) SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
// Also updates the dex_cache_strings_ variable from new_dex_cache.
- void SetDexCache(DexCache* new_dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetDexCache(DexCache* new_dex_cache) SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE StrideIterator<ArtMethod> DirectMethodsBegin(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE StrideIterator<ArtMethod> DirectMethodsEnd(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* GetDirectMethodsPtr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);\
+ ArtMethod* GetDirectMethodsPtr() SHARED_REQUIRES(Locks::mutator_lock_);\
void SetDirectMethodsPtr(ArtMethod* new_direct_methods)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Used by image writer.
void SetDirectMethodsPtrUnchecked(ArtMethod* new_direct_methods)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetDirectMethod(size_t i, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Use only when we are allocating populating the method arrays.
ALWAYS_INLINE ArtMethod* GetDirectMethodUnchecked(size_t i, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetVirtualMethodUnchecked(size_t i, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the number of static, private, and constructor methods.
- ALWAYS_INLINE uint32_t NumDirectMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE uint32_t NumDirectMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_direct_methods_));
}
- void SetNumDirectMethods(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetNumDirectMethods(uint32_t num) SHARED_REQUIRES(Locks::mutator_lock_) {
return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_direct_methods_), num);
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE ArtMethod* GetVirtualMethodsPtr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE ArtMethod* GetVirtualMethodsPtr() SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE StrideIterator<ArtMethod> VirtualMethodsBegin(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE StrideIterator<ArtMethod> VirtualMethodsEnd(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetVirtualMethods(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void SetVirtualMethodsPtr(ArtMethod* new_virtual_methods)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the number of non-inherited virtual methods.
- ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_virtual_methods_));
}
- void SetNumVirtualMethods(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetNumVirtualMethods(uint32_t num) SHARED_REQUIRES(Locks::mutator_lock_) {
return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_virtual_methods_), num);
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ArtMethod* GetVirtualMethod(size_t i, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* GetVirtualMethodDuringLinking(size_t i, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE PointerArray* GetVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE PointerArray* GetVTable() SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE PointerArray* GetVTableDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE PointerArray* GetVTableDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_);
- void SetVTable(PointerArray* new_vtable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetVTable(PointerArray* new_vtable) SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset VTableOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, vtable_);
@@ -751,362 +751,362 @@
return MemberOffset(sizeof(Class));
}
- bool ShouldHaveEmbeddedImtAndVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool ShouldHaveEmbeddedImtAndVTable() SHARED_REQUIRES(Locks::mutator_lock_) {
return IsInstantiable();
}
- bool HasVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool HasVTable() SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset EmbeddedImTableEntryOffset(uint32_t i, size_t pointer_size);
static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size);
ArtMethod* GetEmbeddedImTableEntry(uint32_t i, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- int32_t GetVTableLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t GetVTableLength() SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* GetVTableEntry(uint32_t i, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- int32_t GetEmbeddedVTableLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t GetEmbeddedVTableLength() SHARED_REQUIRES(Locks::mutator_lock_);
- void SetEmbeddedVTableLength(int32_t len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetEmbeddedVTableLength(int32_t len) SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* GetEmbeddedVTableEntry(uint32_t i, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
inline void SetEmbeddedVTableEntryUnchecked(uint32_t i, ArtMethod* method, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void PopulateEmbeddedImtAndVTable(ArtMethod* const (&methods)[kImtSize], size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Given a method implemented by this class but potentially from a super class, return the
// specific implementation method for this class.
ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Given a method implemented by this class' super class, return the specific implementation
// method for this class.
ArtMethod* FindVirtualMethodForSuper(ArtMethod* method, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Given a method implemented by this class, but potentially from a
// super class or interface, return the specific implementation
// method for this class.
ArtMethod* FindVirtualMethodForInterface(ArtMethod* method, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE;
+ SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE;
ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* FindClassInitializer(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* FindClassInitializer(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE int32_t GetIfTableCount() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE int32_t GetIfTableCount() SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE IfTable* GetIfTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE IfTable* GetIfTable() SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) SHARED_REQUIRES(Locks::mutator_lock_);
// Get instance fields of the class (See also GetSFields).
- ArtField* GetIFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* GetIFields() SHARED_REQUIRES(Locks::mutator_lock_);
- void SetIFields(ArtField* new_ifields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetIFields(ArtField* new_ifields) SHARED_REQUIRES(Locks::mutator_lock_);
// Unchecked edition has no verification flags.
- void SetIFieldsUnchecked(ArtField* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetIFieldsUnchecked(ArtField* new_sfields) SHARED_REQUIRES(Locks::mutator_lock_);
- uint32_t NumInstanceFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t NumInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_instance_fields_));
}
- void SetNumInstanceFields(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetNumInstanceFields(uint32_t num) SHARED_REQUIRES(Locks::mutator_lock_) {
return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_instance_fields_), num);
}
- ArtField* GetInstanceField(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* GetInstanceField(uint32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the number of instance fields containing reference types.
- uint32_t NumReferenceInstanceFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t NumReferenceInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsResolved() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
}
- uint32_t NumReferenceInstanceFieldsDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t NumReferenceInstanceFieldsDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsLoaded() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
}
- void SetNumReferenceInstanceFields(uint32_t new_num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetNumReferenceInstanceFields(uint32_t new_num) SHARED_REQUIRES(Locks::mutator_lock_) {
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), new_num);
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- uint32_t GetReferenceInstanceOffsets() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t GetReferenceInstanceOffsets() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
void SetReferenceInstanceOffsets(uint32_t new_reference_offsets)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get the offset of the first reference instance field. Other reference instance fields follow.
MemberOffset GetFirstReferenceInstanceFieldOffset()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the number of static fields containing reference types.
- uint32_t NumReferenceStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t NumReferenceStaticFields() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsResolved() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
}
- uint32_t NumReferenceStaticFieldsDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t NumReferenceStaticFieldsDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsLoaded() || IsErroneous() || IsRetired());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
}
- void SetNumReferenceStaticFields(uint32_t new_num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetNumReferenceStaticFields(uint32_t new_num) SHARED_REQUIRES(Locks::mutator_lock_) {
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), new_num);
}
// Get the offset of the first reference static field. Other reference static fields follow.
MemberOffset GetFirstReferenceStaticFieldOffset(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get the offset of the first reference static field. Other reference static fields follow.
MemberOffset GetFirstReferenceStaticFieldOffsetDuringLinking(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Gets the static fields of the class.
- ArtField* GetSFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* GetSFields() SHARED_REQUIRES(Locks::mutator_lock_);
- void SetSFields(ArtField* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetSFields(ArtField* new_sfields) SHARED_REQUIRES(Locks::mutator_lock_);
// Unchecked edition has no verification flags.
- void SetSFieldsUnchecked(ArtField* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetSFieldsUnchecked(ArtField* new_sfields) SHARED_REQUIRES(Locks::mutator_lock_);
- uint32_t NumStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t NumStaticFields() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_static_fields_));
}
- void SetNumStaticFields(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetNumStaticFields(uint32_t num) SHARED_REQUIRES(Locks::mutator_lock_) {
return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_static_fields_), num);
}
// TODO: uint16_t
- ArtField* GetStaticField(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* GetStaticField(uint32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
// Find a static or instance field using the JLS resolution order
static ArtField* FindField(Thread* self, Handle<Class> klass, const StringPiece& name,
const StringPiece& type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Finds the given instance field in this class or a superclass.
ArtField* FindInstanceField(const StringPiece& name, const StringPiece& type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Finds the given instance field in this class or a superclass, only searches classes that
// have the same dex cache.
ArtField* FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtField* FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtField* FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Finds the given static field in this class or a superclass.
static ArtField* FindStaticField(Thread* self, Handle<Class> klass, const StringPiece& name,
const StringPiece& type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Finds the given static field in this class or superclass, only searches classes that
// have the same dex cache.
static ArtField* FindStaticField(Thread* self, Handle<Class> klass, const DexCache* dex_cache,
uint32_t dex_field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtField* FindDeclaredStaticField(const StringPiece& name, const StringPiece& type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtField* FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- pid_t GetClinitThreadId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ pid_t GetClinitThreadId() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsIdxLoaded() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_));
}
- void SetClinitThreadId(pid_t new_clinit_thread_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetClinitThreadId(pid_t new_clinit_thread_id) SHARED_REQUIRES(Locks::mutator_lock_);
- Class* GetVerifyErrorClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Class* GetVerifyErrorClass() SHARED_REQUIRES(Locks::mutator_lock_) {
// DCHECK(IsErroneous());
return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_));
}
- uint16_t GetDexClassDefIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint16_t GetDexClassDefIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_));
}
- void SetDexClassDefIndex(uint16_t class_def_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetDexClassDefIndex(uint16_t class_def_idx) SHARED_REQUIRES(Locks::mutator_lock_) {
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_), class_def_idx);
}
- uint16_t GetDexTypeIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint16_t GetDexTypeIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_));
}
- void SetDexTypeIndex(uint16_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetDexTypeIndex(uint16_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_) {
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), type_idx);
}
- static Class* GetJavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static Class* GetJavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(HasJavaLangClass());
return java_lang_Class_.Read();
}
- static bool HasJavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static bool HasJavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return !java_lang_Class_.IsNull();
}
// Can't call this SetClass or else gets called instead of Object::SetClass in places.
- static void SetClassClass(Class* java_lang_Class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void SetClassClass(Class* java_lang_Class) SHARED_REQUIRES(Locks::mutator_lock_);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Visit native roots visits roots which are keyed off the native pointers such as ArtFields and
// ArtMethods.
template<class Visitor>
void VisitNativeRoots(Visitor& visitor, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// When class is verified, set the kAccPreverified flag on each method.
void SetPreverifiedFlagOnAllMethods(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template <bool kVisitClass, typename Visitor>
void VisitReferences(mirror::Class* klass, const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get the descriptor of the class. In a few cases a std::string is required, rather than
// always create one the storage argument is populated and its internal c_str() returned. We do
// this to avoid memory allocation in the common case.
- const char* GetDescriptor(std::string* storage) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const char* GetDescriptor(std::string* storage) SHARED_REQUIRES(Locks::mutator_lock_);
- const char* GetArrayDescriptor(std::string* storage) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const char* GetArrayDescriptor(std::string* storage) SHARED_REQUIRES(Locks::mutator_lock_);
- bool DescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool DescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
- const DexFile::ClassDef* GetClassDef() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const DexFile::ClassDef* GetClassDef() SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_REQUIRES(Locks::mutator_lock_);
- uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) SHARED_REQUIRES(Locks::mutator_lock_);
static mirror::Class* GetDirectInterface(Thread* self, Handle<mirror::Class> klass,
uint32_t idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- const char* GetSourceFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const char* GetSourceFile() SHARED_REQUIRES(Locks::mutator_lock_);
- std::string GetLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string GetLocation() SHARED_REQUIRES(Locks::mutator_lock_);
- const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const DexFile& GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
- const DexFile::TypeList* GetInterfaceTypeList() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const DexFile::TypeList* GetInterfaceTypeList() SHARED_REQUIRES(Locks::mutator_lock_);
// Asserts we are initialized or initializing in the given thread.
void AssertInitializedOrInitializingInThread(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
Class* CopyOf(Thread* self, int32_t new_length, ArtMethod* const (&imt)[mirror::Class::kImtSize],
- size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
// For proxy class only.
- ObjectArray<Class>* GetInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ObjectArray<Class>* GetInterfaces() SHARED_REQUIRES(Locks::mutator_lock_);
// For proxy class only.
- ObjectArray<ObjectArray<Class>>* GetThrows() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ObjectArray<ObjectArray<Class>>* GetThrows() SHARED_REQUIRES(Locks::mutator_lock_);
// For reference class only.
- MemberOffset GetDisableIntrinsicFlagOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- MemberOffset GetSlowPathFlagOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool GetSlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetSlowPath(bool enabled) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ MemberOffset GetDisableIntrinsicFlagOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+ MemberOffset GetSlowPathFlagOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool GetSlowPathEnabled() SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetSlowPath(bool enabled) SHARED_REQUIRES(Locks::mutator_lock_);
- ObjectArray<String>* GetDexCacheStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ObjectArray<String>* GetDexCacheStrings() SHARED_REQUIRES(Locks::mutator_lock_);
void SetDexCacheStrings(ObjectArray<String>* new_dex_cache_strings)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset DexCacheStringsOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_strings_);
}
@@ -1114,7 +1114,7 @@
// May cause thread suspension due to EqualParameters.
ArtMethod* GetDeclaredConstructor(
Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Used to initialize a class in the allocation code path to ensure it is guarded by a StoreStore
// fence.
@@ -1124,7 +1124,7 @@
}
void operator()(mirror::Object* obj, size_t usable_size) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
const uint32_t class_size_;
@@ -1133,7 +1133,7 @@
};
// Returns true if the class loader is null, ie the class loader is the boot strap class loader.
- bool IsBootStrapClassLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsBootStrapClassLoaded() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetClassLoader() == nullptr;
}
@@ -1146,34 +1146,34 @@
}
ALWAYS_INLINE ArtMethod* GetDirectMethodsPtrUnchecked()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetVirtualMethodsPtrUnchecked()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
- void SetVerifyErrorClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetVerifyErrorClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
template <bool throw_on_failure, bool use_referrers_cache>
bool ResolvedFieldAccessTest(Class* access_to, ArtField* field,
uint32_t field_idx, DexCache* dex_cache)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template <bool throw_on_failure, bool use_referrers_cache, InvokeType throw_invoke_type>
bool ResolvedMethodAccessTest(Class* access_to, ArtMethod* resolved_method,
uint32_t method_idx, DexCache* dex_cache)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- bool Implements(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsArrayAssignableFromArray(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsAssignableFromArray(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool Implements(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsArrayAssignableFromArray(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsAssignableFromArray(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
- void CheckObjectAlloc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CheckObjectAlloc() SHARED_REQUIRES(Locks::mutator_lock_);
// Unchecked editions is for root visiting.
- ArtField* GetSFieldsUnchecked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtField* GetIFieldsUnchecked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* GetSFieldsUnchecked() SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtField* GetIFieldsUnchecked() SHARED_REQUIRES(Locks::mutator_lock_);
- bool ProxyDescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool ProxyDescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
// Check that the pointer size mathces the one in the class linker.
ALWAYS_INLINE static void CheckPointerSize(size_t pointer_size);
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index b10a296..134f1cd 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -32,7 +32,7 @@
static constexpr uint32_t InstanceSize() {
return sizeof(ClassLoader);
}
- ClassLoader* GetParent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ClassLoader* GetParent() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, parent_));
}
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 0ce83ec..ba49a15 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -48,12 +48,12 @@
void Init(const DexFile* dex_file, String* location, ObjectArray<String>* strings,
ObjectArray<Class>* types, PointerArray* methods, PointerArray* fields,
- size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
void Fixup(ArtMethod* trampoline, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- String* GetLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ String* GetLocation() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
}
@@ -73,76 +73,76 @@
return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_methods_);
}
- size_t NumStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t NumStrings() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStrings()->GetLength();
}
- size_t NumResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t NumResolvedTypes() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetResolvedTypes()->GetLength();
}
- size_t NumResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t NumResolvedMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetResolvedMethods()->GetLength();
}
- size_t NumResolvedFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t NumResolvedFields() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetResolvedFields()->GetLength();
}
- String* GetResolvedString(uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ String* GetResolvedString(uint32_t string_idx) SHARED_REQUIRES(Locks::mutator_lock_) {
return GetStrings()->Get(string_idx);
}
void SetResolvedString(uint32_t string_idx, String* resolved) ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// TODO default transaction support.
GetStrings()->Set(string_idx, resolved);
}
Class* GetResolvedType(uint32_t type_idx) ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return GetResolvedTypes()->Get(type_idx);
}
void SetResolvedType(uint32_t type_idx, Class* resolved)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, size_t ptr_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx, ArtMethod* resolved, size_t ptr_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Pointer sized variant, used for patching.
ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, size_t ptr_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Pointer sized variant, used for patching.
ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, size_t ptr_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- ObjectArray<String>* GetStrings() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ObjectArray<String>* GetStrings() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<ObjectArray<String>>(StringsOffset());
}
- ObjectArray<Class>* GetResolvedTypes() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ObjectArray<Class>* GetResolvedTypes() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<ObjectArray<Class>>(
OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_));
}
- PointerArray* GetResolvedMethods() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ PointerArray* GetResolvedMethods() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<PointerArray>(ResolvedMethodsOffset());
}
- PointerArray* GetResolvedFields() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ PointerArray* GetResolvedFields() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<PointerArray>(ResolvedFieldsOffset());
}
- const DexFile* GetDexFile() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DexFile* GetDexFile() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_));
}
- void SetDexFile(const DexFile* dex_file) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ void SetDexFile(const DexFile* dex_file) SHARED_REQUIRES(Locks::mutator_lock_)
ALWAYS_INLINE {
return SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file);
}
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index d927f0c..b5a954f 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -36,66 +36,66 @@
// C++ mirror of java.lang.reflect.Field.
class MANAGED Field : public AccessibleObject {
public:
- static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return static_class_.Read();
}
- static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return array_class_.Read();
}
- ALWAYS_INLINE uint32_t GetDexFieldIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE uint32_t GetDexFieldIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_));
}
- mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_));
}
- uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_));
}
- bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccStatic) != 0;
}
- bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccFinal) != 0;
}
- bool IsVolatile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsVolatile() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccVolatile) != 0;
}
ALWAYS_INLINE Primitive::Type GetTypeAsPrimitiveType()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return GetType()->GetPrimitiveType();
}
- mirror::Class* GetType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* GetType() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<mirror::Class>(OFFSET_OF_OBJECT_MEMBER(Field, type_));
}
- int32_t GetOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ int32_t GetOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_));
}
- static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
- static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
- static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
// Slow, try to use only for PrettyField and such.
- ArtField* GetArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* GetArtField() SHARED_REQUIRES(Locks::mutator_lock_);
template <bool kTransactionActive = false>
static mirror::Field* CreateFromArtField(Thread* self, ArtField* field,
bool force_resolve)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
HeapReference<mirror::Class> declaring_class_;
@@ -105,27 +105,27 @@
int32_t offset_;
template<bool kTransactionActive>
- void SetDeclaringClass(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetDeclaringClass(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) {
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), c);
}
template<bool kTransactionActive>
- void SetType(mirror::Class* type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetType(mirror::Class* type) SHARED_REQUIRES(Locks::mutator_lock_) {
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, type_), type);
}
template<bool kTransactionActive>
- void SetAccessFlags(uint32_t flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetAccessFlags(uint32_t flags) SHARED_REQUIRES(Locks::mutator_lock_) {
SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_), flags);
}
template<bool kTransactionActive>
- void SetDexFieldIndex(uint32_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetDexFieldIndex(uint32_t idx) SHARED_REQUIRES(Locks::mutator_lock_) {
SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_), idx);
}
template<bool kTransactionActive>
- void SetOffset(uint32_t offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetOffset(uint32_t offset) SHARED_REQUIRES(Locks::mutator_lock_) {
SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, offset_), offset);
}
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index 1ea5bee..b21ecdf 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -25,34 +25,34 @@
class MANAGED IfTable FINAL : public ObjectArray<Object> {
public:
- ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
Class* interface = GetWithoutChecks((i * kMax) + kInterface)->AsClass();
DCHECK(interface != nullptr);
return interface;
}
ALWAYS_INLINE void SetInterface(int32_t i, Class* interface)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- PointerArray* GetMethodArray(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ PointerArray* GetMethodArray(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
auto* method_array = down_cast<PointerArray*>(Get((i * kMax) + kMethodArray));
DCHECK(method_array != nullptr);
return method_array;
}
- size_t GetMethodArrayCount(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t GetMethodArrayCount(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
auto* method_array = down_cast<PointerArray*>(Get((i * kMax) + kMethodArray));
return method_array == nullptr ? 0u : method_array->GetLength();
}
- void SetMethodArray(int32_t i, PointerArray* arr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetMethodArray(int32_t i, PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(arr != nullptr);
auto idx = i * kMax + kMethodArray;
DCHECK(Get(idx) == nullptr);
Set<false>(idx, arr);
}
- size_t Count() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t Count() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetLength() / kMax;
}
diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h
index 42c76c0..fe021db 100644
--- a/runtime/mirror/method.h
+++ b/runtime/mirror/method.h
@@ -29,25 +29,25 @@
class MANAGED Method : public AbstractMethod {
public:
static Method* CreateFromArtMethod(Thread* self, ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return static_class_.Read();
}
- static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
- static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
- static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return array_class_.Read();
}
- static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
- static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
- static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
private:
static GcRoot<Class> static_class_; // java.lang.reflect.Method.class.
@@ -60,25 +60,25 @@
class MANAGED Constructor: public AbstractMethod {
public:
static Constructor* CreateFromArtMethod(Thread* self, ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return static_class_.Read();
}
- static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
- static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
- static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return array_class_.Read();
}
- static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
- static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
- static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
private:
static GcRoot<Class> static_class_; // java.lang.reflect.Constructor.class.
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index e019d5a..c5610b5 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -477,7 +477,7 @@
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
inline void Object::SetFieldBoolean(MemberOffset field_offset, uint8_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
@@ -495,7 +495,7 @@
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
inline void Object::SetFieldByte(MemberOffset field_offset, int8_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index b177e2f..27dd743 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -47,7 +47,7 @@
: dest_obj_(dest_obj) {}
void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
- ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
// GetFieldObject() contains a RB.
Object* ref = obj->GetFieldObject<Object>(offset);
// No WB here as a large object space does not have a card table
@@ -56,7 +56,7 @@
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
// Copy java.lang.ref.Reference.referent which isn't visited in
// Object::VisitReferences().
DCHECK(klass->IsTypeOfReferenceClass());
@@ -107,7 +107,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Object::CopyObject(self_, obj, orig_->Get(), num_bytes_);
}
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index f1c96b5..8b541fd 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -84,40 +84,39 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE Class* GetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE Class* GetClass() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetClass(Class* new_klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetClass(Class* new_klass) SHARED_REQUIRES(Locks::mutator_lock_);
- Object* GetReadBarrierPointer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Object* GetReadBarrierPointer() SHARED_REQUIRES(Locks::mutator_lock_);
#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
NO_RETURN
#endif
- void SetReadBarrierPointer(Object* rb_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetReadBarrierPointer(Object* rb_ptr) SHARED_REQUIRES(Locks::mutator_lock_);
#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
NO_RETURN
#endif
bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void AssertReadBarrierPointer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ void AssertReadBarrierPointer() const SHARED_REQUIRES(Locks::mutator_lock_);
// The verifier treats all interfaces as java.lang.Object and relies on runtime checks in
// invoke-interface to detect incompatible interface types.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool VerifierInstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool VerifierInstanceOf(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool InstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE bool InstanceOf(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_);
- Object* Clone(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Object* Clone(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
int32_t IdentityHashCode() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
static MemberOffset MonitorOffset() {
return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
@@ -126,298 +125,298 @@
// As_volatile can be false if the mutators are suspended. This is an optimization since it
// avoids the barriers.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- LockWord GetLockWord(bool as_volatile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ LockWord GetLockWord(bool as_volatile) SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetLockWord(LockWord new_val, bool as_volatile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetLockWord(LockWord new_val, bool as_volatile) SHARED_REQUIRES(Locks::mutator_lock_);
bool CasLockWordWeakSequentiallyConsistent(LockWord old_val, LockWord new_val)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
uint32_t GetLockOwnerThreadId();
- mirror::Object* MonitorEnter(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ mirror::Object* MonitorEnter(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
EXCLUSIVE_LOCK_FUNCTION();
- bool MonitorExit(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ bool MonitorExit(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
UNLOCK_FUNCTION();
- void Notify(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void NotifyAll(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Wait(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Notify(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+ void NotifyAll(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Wait(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsClass() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Class* AsClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Class* AsClass() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ObjectArray<T>* AsObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ObjectArray<T>* AsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsArrayInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsArrayInstance() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Array* AsArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Array* AsArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- BooleanArray* AsBooleanArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ BooleanArray* AsBooleanArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ByteArray* AsByteArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ByteArray* AsByteArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ByteArray* AsByteSizedArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ByteArray* AsByteSizedArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- CharArray* AsCharArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ CharArray* AsCharArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ShortArray* AsShortArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ShortArray* AsShortArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ShortArray* AsShortSizedArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ShortArray* AsShortSizedArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsIntArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsIntArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- IntArray* AsIntArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ IntArray* AsIntArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsLongArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsLongArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- LongArray* AsLongArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ LongArray* AsLongArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsFloatArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsFloatArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- FloatArray* AsFloatArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ FloatArray* AsFloatArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsDoubleArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsDoubleArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- DoubleArray* AsDoubleArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ DoubleArray* AsDoubleArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsString() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- String* AsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ String* AsString() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- Throwable* AsThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Throwable* AsThrowable() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- Reference* AsReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Reference* AsReference() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsWeakReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsWeakReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsSoftReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsSoftReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsFinalizerReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsFinalizerReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- FinalizerReference* AsFinalizerReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ FinalizerReference* AsFinalizerReference() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPhantomReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsPhantomReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
// Accessor for Java type fields.
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier, bool kIsVolatile = false>
ALWAYS_INLINE T* GetFieldObject(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE T* GetFieldObjectVolatile(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldObject(MemberOffset field_offset, Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldObjectVolatile(MemberOffset field_offset, Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset,
Object* old_value,
Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset,
Object* old_value,
Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE uint8_t GetFieldBoolean(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int8_t GetFieldByte(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE uint8_t GetFieldBooleanVolatile(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int8_t GetFieldByteVolatile(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldBoolean(MemberOffset field_offset, uint8_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldByte(MemberOffset field_offset, int8_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldBooleanVolatile(MemberOffset field_offset, uint8_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldByteVolatile(MemberOffset field_offset, int8_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE uint16_t GetFieldChar(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int16_t GetFieldShort(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE uint16_t GetFieldCharVolatile(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int16_t GetFieldShortVolatile(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldChar(MemberOffset field_offset, uint16_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldShort(MemberOffset field_offset, int16_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldCharVolatile(MemberOffset field_offset, uint16_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldShortVolatile(MemberOffset field_offset, int16_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int32_t GetField32Volatile(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetField32(MemberOffset field_offset, int32_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetField32Volatile(MemberOffset field_offset, int32_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE bool CasFieldWeakSequentiallyConsistent32(MemberOffset field_offset,
int32_t old_value, int32_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakRelaxed32(MemberOffset field_offset, int32_t old_value,
int32_t new_value) ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value,
int32_t new_value) ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int64_t GetField64(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int64_t GetField64Volatile(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetField64(MemberOffset field_offset, int64_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetField64Volatile(MemberOffset field_offset, int64_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value,
int64_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value,
int64_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
void SetFieldPtr(MemberOffset field_offset, T new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>(
field_offset, new_value, sizeof(void*));
}
@@ -426,7 +425,7 @@
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset, T new_value,
size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
if (pointer_size == 4) {
intptr_t ptr = reinterpret_cast<intptr_t>(new_value);
@@ -439,13 +438,13 @@
}
}
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ // SHARED_REQUIRES(Locks::mutator_lock_).
template <const bool kVisitClass, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
typename Visitor, typename JavaLangRefVisitor = VoidFunctor>
void VisitReferences(const Visitor& visitor, const JavaLangRefVisitor& ref_visitor)
NO_THREAD_SAFETY_ANALYSIS;
- ArtField* FindFieldByOffset(MemberOffset offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* FindFieldByOffset(MemberOffset offset) SHARED_REQUIRES(Locks::mutator_lock_);
// Used by object_test.
static void SetHashCodeSeed(uint32_t new_seed);
@@ -456,13 +455,13 @@
// Accessors for non-Java type fields
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
T GetFieldPtr(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, sizeof(void*));
}
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
if (pointer_size == 4) {
return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
@@ -480,25 +479,25 @@
NO_THREAD_SAFETY_ANALYSIS;
template<bool kVisitClass, typename Visitor>
void VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kVisitClass, typename Visitor>
void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
template<typename kSize, bool kIsVolatile>
ALWAYS_INLINE void SetField(MemberOffset field_offset, kSize new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<typename kSize, bool kIsVolatile>
ALWAYS_INLINE kSize GetField(MemberOffset field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Verify the type correctness of stores to fields.
// TODO: This can cause thread suspension and isn't moving GC safe.
void CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CheckFieldAssignment(MemberOffset field_offset, Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (kCheckFieldAssignments) {
CheckFieldAssignmentImpl(field_offset, new_value);
}
@@ -509,7 +508,7 @@
// Class::CopyOf().
static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src,
size_t num_bytes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static Atomic<uint32_t> hash_code_seed;
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index 5eddc18..cee17e9 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -32,21 +32,21 @@
static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length,
gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- T* Get(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ T* Get(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if the object can be stored into the array. If not, throws
// an ArrayStoreException and returns false.
- // TODO fix thread safety analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ // TODO fix thread safety analysis: should be SHARED_REQUIRES(Locks::mutator_lock_).
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CheckAssignable(T* object) NO_THREAD_SAFETY_ANALYSIS;
- ALWAYS_INLINE void Set(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // TODO fix thread safety analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ ALWAYS_INLINE void Set(int32_t i, T* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ // TODO fix thread safety analysis: should be SHARED_REQUIRES(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void Set(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
@@ -54,37 +54,37 @@
// Set element without bound and element type checks, to be used in limited
// circumstances, such as during boot image writing.
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ // SHARED_REQUIRES(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetWithoutChecks(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ // SHARED_REQUIRES(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetWithoutChecksAndWriteBarrier(int32_t i, T* object)
NO_THREAD_SAFETY_ANALYSIS;
- ALWAYS_INLINE T* GetWithoutChecks(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE T* GetWithoutChecks(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
// Copy src into this array (dealing with overlaps as memmove does) without assignability checks.
void AssignableMemmove(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
- int32_t count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t count) SHARED_REQUIRES(Locks::mutator_lock_);
// Copy src into this array assuming no overlap and without assignability checks.
void AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
- int32_t count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t count) SHARED_REQUIRES(Locks::mutator_lock_);
// Copy src into this array with assignability checks.
void AssignableCheckingMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
int32_t count, bool throw_exception)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ObjectArray<T>* CopyOf(Thread* self, int32_t new_length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ // SHARED_REQUIRES(Locks::mutator_lock_).
template<const bool kVisitClass, typename Visitor>
void VisitReferences(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 055be85..2a5c88e 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -33,11 +33,11 @@
template<bool kPoisonReferences, class MirrorType>
class MANAGED ObjectReference {
public:
- MirrorType* AsMirrorPtr() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ MirrorType* AsMirrorPtr() const SHARED_REQUIRES(Locks::mutator_lock_) {
return UnCompress();
}
- void Assign(MirrorType* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void Assign(MirrorType* other) SHARED_REQUIRES(Locks::mutator_lock_) {
reference_ = Compress(other);
}
@@ -56,18 +56,18 @@
protected:
ObjectReference<kPoisonReferences, MirrorType>(MirrorType* mirror_ptr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
: reference_(Compress(mirror_ptr)) {
}
// Compress reference to its bit representation.
- static uint32_t Compress(MirrorType* mirror_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static uint32_t Compress(MirrorType* mirror_ptr) SHARED_REQUIRES(Locks::mutator_lock_) {
uintptr_t as_bits = reinterpret_cast<uintptr_t>(mirror_ptr);
return static_cast<uint32_t>(kPoisonReferences ? -as_bits : as_bits);
}
// Uncompress an encoded reference from its bit representation.
- MirrorType* UnCompress() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ MirrorType* UnCompress() const SHARED_REQUIRES(Locks::mutator_lock_) {
uintptr_t as_bits = kPoisonReferences ? -reference_ : reference_;
return reinterpret_cast<MirrorType*>(as_bits);
}
@@ -83,11 +83,11 @@
class MANAGED HeapReference : public ObjectReference<kPoisonHeapReferences, MirrorType> {
public:
static HeapReference<MirrorType> FromMirrorPtr(MirrorType* mirror_ptr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return HeapReference<MirrorType>(mirror_ptr);
}
private:
- HeapReference<MirrorType>(MirrorType* mirror_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ HeapReference<MirrorType>(MirrorType* mirror_ptr) SHARED_REQUIRES(Locks::mutator_lock_)
: ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}
};
@@ -95,16 +95,16 @@
template<class MirrorType>
class MANAGED CompressedReference : public mirror::ObjectReference<false, MirrorType> {
public:
- CompressedReference<MirrorType>() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ CompressedReference<MirrorType>() SHARED_REQUIRES(Locks::mutator_lock_)
: mirror::ObjectReference<false, MirrorType>(nullptr) {}
static CompressedReference<MirrorType> FromMirrorPtr(MirrorType* p)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return CompressedReference<MirrorType>(p);
}
private:
- CompressedReference<MirrorType>(MirrorType* p) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ CompressedReference<MirrorType>(MirrorType* p) SHARED_REQUIRES(Locks::mutator_lock_)
: mirror::ObjectReference<false, MirrorType>(p) {}
};
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 85ea28f..f5a0445 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -48,7 +48,7 @@
const char* utf8_in,
const char* utf16_expected_le,
int32_t expected_hash)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::unique_ptr<uint16_t[]> utf16_expected(new uint16_t[expected_utf16_length]);
for (int32_t i = 0; i < expected_utf16_length; i++) {
uint16_t ch = (((utf16_expected_le[i*2 + 0] & 0xff) << 8) |
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 4bbdb99..51ae760 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -62,49 +62,49 @@
return OFFSET_OF_OBJECT_MEMBER(Reference, referent_);
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Object* GetReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Object* GetReferent() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object, kDefaultVerifyFlags, kReadBarrierOption>(
ReferentOffset());
}
template<bool kTransactionActive>
- void SetReferent(Object* referent) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetReferent(Object* referent) SHARED_REQUIRES(Locks::mutator_lock_) {
SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent);
}
template<bool kTransactionActive>
- void ClearReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void ClearReferent() SHARED_REQUIRES(Locks::mutator_lock_) {
SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
}
// Volatile read/write is not necessary since the java pending next is only accessed from
// the java threads for cleared references. Once these cleared references have a null referent,
// we never end up reading their pending next from the GC again.
- Reference* GetPendingNext() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Reference* GetPendingNext() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<Reference>(PendingNextOffset());
}
template<bool kTransactionActive>
- void SetPendingNext(Reference* pending_next) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetPendingNext(Reference* pending_next) SHARED_REQUIRES(Locks::mutator_lock_) {
SetFieldObject<kTransactionActive>(PendingNextOffset(), pending_next);
}
- bool IsEnqueued() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsEnqueued() SHARED_REQUIRES(Locks::mutator_lock_) {
// Since the references are stored as cyclic lists it means that once enqueued, the pending
// next is always non-null.
return GetPendingNext() != nullptr;
}
- bool IsEnqueuable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsEnqueuable() SHARED_REQUIRES(Locks::mutator_lock_);
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- static Class* GetJavaLangRefReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static Class* GetJavaLangRefReference() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!java_lang_ref_Reference_.IsNull());
return java_lang_ref_Reference_.Read<kReadBarrierOption>();
}
static void SetClass(Class* klass);
static void ResetClass();
- static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
private:
// Note: This avoids a read barrier, it should only be used by the GC.
- HeapReference<Object>* GetReferentReferenceAddr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ HeapReference<Object>* GetReferentReferenceAddr() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObjectReferenceAddr<kDefaultVerifyFlags>(ReferentOffset());
}
@@ -130,10 +130,10 @@
}
template<bool kTransactionActive>
- void SetZombie(Object* zombie) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetZombie(Object* zombie) SHARED_REQUIRES(Locks::mutator_lock_) {
return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
}
- Object* GetZombie() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Object* GetZombie() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object>(ZombieOffset());
}
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index dc7131e..d58c0b6 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -31,32 +31,32 @@
// C++ mirror of java.lang.StackTraceElement
class MANAGED StackTraceElement FINAL : public Object {
public:
- String* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ String* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_));
}
- String* GetMethodName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ String* GetMethodName() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_));
}
- String* GetFileName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ String* GetFileName() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_));
}
- int32_t GetLineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ int32_t GetLineNumber() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_));
}
static StackTraceElement* Alloc(Thread* self, Handle<String> declaring_class,
Handle<String> method_name, Handle<String> file_name,
int32_t line_number)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void SetClass(Class* java_lang_StackTraceElement);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static Class* GetStackTraceElement() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ static Class* GetStackTraceElement() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!java_lang_StackTraceElement_.IsNull());
return java_lang_StackTraceElement_.Read();
}
@@ -71,7 +71,7 @@
template<bool kTransactionActive>
void Init(Handle<String> declaring_class, Handle<String> method_name, Handle<String> file_name,
int32_t line_number)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static GcRoot<Class> java_lang_StackTraceElement_;
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index b689057..3a39f58 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -42,7 +42,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
String* string = down_cast<String*>(obj);
string->SetCount(count_);
@@ -61,7 +61,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
String* string = down_cast<String*>(obj);
string->SetCount(count_);
@@ -88,7 +88,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
String* string = down_cast<String*>(obj);
string->SetCount(count_);
@@ -111,7 +111,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
String* string = down_cast<String*>(obj);
string->SetCount(count_);
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index af06385..b4a2d1d 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -49,87 +49,87 @@
return OFFSET_OF_OBJECT_MEMBER(String, value_);
}
- uint16_t* GetValue() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint16_t* GetValue() SHARED_REQUIRES(Locks::mutator_lock_) {
return &value_[0];
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ int32_t GetLength() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(String, count_));
}
- void SetCount(int32_t new_count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetCount(int32_t new_count) SHARED_REQUIRES(Locks::mutator_lock_) {
// Count is invariant so use non-transactional mode. Also disable check as we may run inside
// a transaction.
DCHECK_LE(0, new_count);
SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(String, count_), new_count);
}
- int32_t GetHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t GetHashCode() SHARED_REQUIRES(Locks::mutator_lock_);
// Computes, stores, and returns the hash code.
- int32_t ComputeHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t ComputeHashCode() SHARED_REQUIRES(Locks::mutator_lock_);
- int32_t GetUtfLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t GetUtfLength() SHARED_REQUIRES(Locks::mutator_lock_);
- uint16_t CharAt(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint16_t CharAt(int32_t index) SHARED_REQUIRES(Locks::mutator_lock_);
- void SetCharAt(int32_t index, uint16_t c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetCharAt(int32_t index, uint16_t c) SHARED_REQUIRES(Locks::mutator_lock_);
- String* Intern() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ String* Intern() SHARED_REQUIRES(Locks::mutator_lock_);
template <bool kIsInstrumented, typename PreFenceVisitor>
ALWAYS_INLINE static String* Alloc(Thread* self, int32_t utf16_length,
gc::AllocatorType allocator_type,
const PreFenceVisitor& pre_fence_visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template <bool kIsInstrumented>
ALWAYS_INLINE static String* AllocFromByteArray(Thread* self, int32_t byte_length,
Handle<ByteArray> array, int32_t offset,
int32_t high_byte,
gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template <bool kIsInstrumented>
ALWAYS_INLINE static String* AllocFromCharArray(Thread* self, int32_t count,
Handle<CharArray> array, int32_t offset,
gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
template <bool kIsInstrumented>
ALWAYS_INLINE static String* AllocFromString(Thread* self, int32_t string_length,
Handle<String> string, int32_t offset,
gc::AllocatorType allocator_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static String* AllocFromStrings(Thread* self, Handle<String> string, Handle<String> string2)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static String* AllocFromUtf16(Thread* self, int32_t utf16_length, const uint16_t* utf16_data_in)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static String* AllocFromModifiedUtf8(Thread* self, const char* utf)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static String* AllocFromModifiedUtf8(Thread* self, int32_t utf16_length, const char* utf8_data_in)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// TODO: This is only used in the interpreter to compare against
// entries from a dex files constant pool (ArtField names). Should
// we unify this with Equals(const StringPiece&); ?
- bool Equals(const char* modified_utf8) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool Equals(const char* modified_utf8) SHARED_REQUIRES(Locks::mutator_lock_);
// TODO: This is only used to compare DexCache.location with
// a dex_file's location (which is an std::string). Do we really
// need this in mirror::String just for that one usage ?
bool Equals(const StringPiece& modified_utf8)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- bool Equals(String* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool Equals(String* that) SHARED_REQUIRES(Locks::mutator_lock_);
// Compare UTF-16 code point values not in a locale-sensitive manner
int Compare(int32_t utf16_length, const char* utf8_data_in);
@@ -137,21 +137,21 @@
// TODO: do we need this overload? give it a more intention-revealing name.
bool Equals(const uint16_t* that_chars, int32_t that_offset,
int32_t that_length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Create a modified UTF-8 encoded std::string from a java/lang/String object.
- std::string ToModifiedUtf8() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string ToModifiedUtf8() SHARED_REQUIRES(Locks::mutator_lock_);
- int32_t FastIndexOf(int32_t ch, int32_t start) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t FastIndexOf(int32_t ch, int32_t start) SHARED_REQUIRES(Locks::mutator_lock_);
- int32_t CompareTo(String* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t CompareTo(String* other) SHARED_REQUIRES(Locks::mutator_lock_);
- CharArray* ToCharArray(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ CharArray* ToCharArray(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
void GetChars(int32_t start, int32_t end, Handle<CharArray> array, int32_t index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- static Class* GetJavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static Class* GetJavaLangString() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!java_lang_String_.IsNull());
return java_lang_String_.Read();
}
@@ -159,10 +159,10 @@
static void SetClass(Class* java_lang_String);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
- void SetHashCode(int32_t new_hash_code) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetHashCode(int32_t new_hash_code) SHARED_REQUIRES(Locks::mutator_lock_) {
// Hash code is invariant so use non-transactional mode. Also disable check as we may run inside
// a transaction.
DCHECK_EQ(0, GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_)));
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index 1c21edb..e8633de 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -53,7 +53,7 @@
}
}
-void Throwable::SetStackState(Object* state) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void Throwable::SetStackState(Object* state) SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(state != nullptr);
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObjectVolatile<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_), state);
diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h
index 9cc0b6f..0f488dc 100644
--- a/runtime/mirror/throwable.h
+++ b/runtime/mirror/throwable.h
@@ -31,38 +31,38 @@
// C++ mirror of java.lang.Throwable
class MANAGED Throwable : public Object {
public:
- void SetDetailMessage(String* new_detail_message) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetDetailMessage(String* new_detail_message) SHARED_REQUIRES(Locks::mutator_lock_);
- String* GetDetailMessage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ String* GetDetailMessage() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_));
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_REQUIRES(Locks::mutator_lock_);
// This is a runtime version of initCause, you shouldn't use it if initCause may have been
// overridden. Also it asserts rather than throwing exceptions. Currently this is only used
// in cases like the verifier where the checks cannot fail and initCause isn't overridden.
- void SetCause(Throwable* cause) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetStackState(Object* state) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsCheckedException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetCause(Throwable* cause) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetStackState(Object* state) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsCheckedException() SHARED_REQUIRES(Locks::mutator_lock_);
- static Class* GetJavaLangThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static Class* GetJavaLangThrowable() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!java_lang_Throwable_.IsNull());
return java_lang_Throwable_.Read();
}
- int32_t GetStackDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t GetStackDepth() SHARED_REQUIRES(Locks::mutator_lock_);
static void SetClass(Class* java_lang_Throwable);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
- Object* GetStackState() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Object* GetStackState() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_));
}
- Object* GetStackTrace() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Object* GetStackTrace() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_trace_));
}