Move to newer clang annotations
Also enable -Wthread-safety-negative.
Changes:
Switch to capabilities and negative capabilities.
Future work:
Use capabilities to implement uninterruptible annotations to work
with AssertNoThreadSuspension.
Bug: 20072211
Change-Id: I42fcbe0300d98a831c89d1eff3ecd5a7e99ebf33
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 55adaf1..8aed768 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -184,19 +184,19 @@
bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
bool IsShuttingDown(Thread* self);
- bool IsShuttingDownLocked() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
+ bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) {
return shutting_down_;
}
- size_t NumberOfThreadsBeingBorn() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
+ size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) {
return threads_being_born_;
}
- void StartThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
+ void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
threads_being_born_++;
}
- void EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
+ void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_);
bool IsStarted() const {
return started_;
@@ -212,7 +212,7 @@
// Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
// callers should prefer.
- NO_RETURN static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_);
+ NO_RETURN static void Abort() REQUIRES(!Locks::abort_lock_);
// Returns the "main" ThreadGroup, used when attaching user threads.
jobject GetMainThreadGroup() const;
@@ -230,7 +230,7 @@
void CallExitHook(jint status);
// Detaches the current native thread from the runtime.
- void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_);
+ void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os);
void DumpLockHolders(std::ostream& os);
@@ -279,15 +279,15 @@
}
// Is the given object the special object used to mark a cleared JNI weak global?
- bool IsClearedJniWeakGlobal(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsClearedJniWeakGlobal(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
// Get the special object used to mark a cleared JNI weak global.
- mirror::Object* GetClearedJniWeakGlobal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* GetClearedJniWeakGlobal() SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Throwable* GetPreAllocatedOutOfMemoryError() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Throwable* GetPreAllocatedOutOfMemoryError() SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const std::vector<std::string>& GetProperties() const {
return properties_;
@@ -301,77 +301,77 @@
return "2.1.0";
}
- void DisallowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void AllowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void EnsureNewSystemWeaksDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void BroadcastForNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void DisallowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
+ void AllowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
+ void EnsureNewSystemWeaksDisallowed() SHARED_REQUIRES(Locks::mutator_lock_);
+ void BroadcastForNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
// Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
// clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Visit image roots, only used for hprof since the GC uses the image space mod union table
// instead.
- void VisitImageRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitImageRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
// Visit all of the roots we can do safely do concurrently.
void VisitConcurrentRoots(RootVisitor* visitor,
VisitRootFlags flags = kVisitRootFlagAllRoots)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Visit all of the non thread roots, we can do this with mutators unpaused.
void VisitNonThreadRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void VisitTransactionRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Visit all of the thread roots.
- void VisitThreadRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitThreadRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
// Flip thread roots from from-space refs to to-space refs.
size_t FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
gc::collector::GarbageCollector* collector)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
// Visit all other roots which must be done with mutators suspended.
void VisitNonConcurrentRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
// system weak is updated to be the visitor's returned value.
void SweepSystemWeaks(IsMarkedVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Constant roots are the roots which never change after the runtime is initialized, they only
// need to be visited once per GC cycle.
void VisitConstantRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns a special method that calls into a trampoline for runtime method resolution
- ArtMethod* GetResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetResolutionMethod() SHARED_REQUIRES(Locks::mutator_lock_);
bool HasResolutionMethod() const {
return resolution_method_ != nullptr;
}
- void SetResolutionMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetResolutionMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* CreateResolutionMethod() SHARED_REQUIRES(Locks::mutator_lock_);
// Returns a special method that calls into a trampoline for runtime imt conflicts.
- ArtMethod* GetImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* GetImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetImtConflictMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* GetImtUnimplementedMethod() SHARED_REQUIRES(Locks::mutator_lock_);
bool HasImtConflictMethod() const {
return imt_conflict_method_ != nullptr;
}
- void SetImtConflictMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetImtUnimplementedMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetImtConflictMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetImtUnimplementedMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* CreateImtConflictMethod() SHARED_REQUIRES(Locks::mutator_lock_);
// Returns a special method that describes all callee saves being spilled to the stack.
enum CalleeSaveType {
@@ -386,17 +386,17 @@
}
ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
return callee_save_method_frame_infos_[type];
}
QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
return OFFSETOF_MEMBER(Runtime, callee_save_methods_[type]);
@@ -410,7 +410,7 @@
void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
- ArtMethod* CreateCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* CreateCalleeSaveMethod() SHARED_REQUIRES(Locks::mutator_lock_);
int32_t GetStat(int kind);
@@ -424,8 +424,8 @@
void ResetStats(int kinds);
- void SetStatsEnabled(bool new_state) LOCKS_EXCLUDED(Locks::instrument_entrypoints_lock_,
- Locks::mutator_lock_);
+ void SetStatsEnabled(bool new_state)
+ REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_);
enum class NativeBridgeAction { // private
kUnload,
@@ -463,9 +463,9 @@
bool IsTransactionAborted() const;
void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void ThrowTransactionAbortError(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
bool is_volatile) const;
@@ -482,17 +482,17 @@
void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
mirror::Object* value, bool is_volatile) const;
void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void RecordStrongStringInsertion(mirror::String* s) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ REQUIRES(Locks::intern_table_lock_);
void RecordWeakStringInsertion(mirror::String* s) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ REQUIRES(Locks::intern_table_lock_);
void RecordStrongStringRemoval(mirror::String* s) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ REQUIRES(Locks::intern_table_lock_);
void RecordWeakStringRemoval(mirror::String* s) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ REQUIRES(Locks::intern_table_lock_);
- void SetFaultMessage(const std::string& message);
+ void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_);
// Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
// with the unexpected_signal_lock_.
const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
@@ -572,7 +572,7 @@
bool Init(const RuntimeOptions& options, bool ignore_unrecognized)
SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
- void InitNativeMethods() LOCKS_EXCLUDED(Locks::mutator_lock_);
+ void InitNativeMethods() REQUIRES(!Locks::mutator_lock_);
void InitThreadGroups(Thread* self);
void RegisterRuntimeNativeMethods(JNIEnv* env);