Move to newer clang annotations
Also enable -Wthread-safety-negative.
Changes:
Switch to capabilities and negative capabilities.
Future work:
Use capabilities to implement uninterruptible annotations to work
with AssertNoThreadSuspension.
Bug: 20072211
Change-Id: I42fcbe0300d98a831c89d1eff3ecd5a7e99ebf33
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index d977941..c4b36ee 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -182,12 +182,12 @@
public:
explicit ArenaPool(bool use_malloc = true, bool low_4gb = false);
~ArenaPool();
- Arena* AllocArena(size_t size) LOCKS_EXCLUDED(lock_);
- void FreeArenaChain(Arena* first) LOCKS_EXCLUDED(lock_);
- size_t GetBytesAllocated() const LOCKS_EXCLUDED(lock_);
+ Arena* AllocArena(size_t size) REQUIRES(!lock_);
+ void FreeArenaChain(Arena* first) REQUIRES(!lock_);
+ size_t GetBytesAllocated() const REQUIRES(!lock_);
// Trim the maps in arenas by madvising, used by JIT to reduce memory usage. This only works
// use_malloc is false.
- void TrimMaps() LOCKS_EXCLUDED(lock_);
+ void TrimMaps() REQUIRES(!lock_);
private:
const bool use_malloc_;
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 93d4edc..2cd1a4d 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -237,7 +237,7 @@
public:
LogMessage(const char* file, unsigned int line, LogSeverity severity, int error);
- ~LogMessage(); // TODO: enable LOCKS_EXCLUDED(Locks::logging_lock_).
+ ~LogMessage(); // TODO: enable REQUIRES(!Locks::logging_lock_).
// Returns the stream associated with the message, the LogMessage performs output when it goes
// out of scope.
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 5c59647..1d5dee2 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -244,18 +244,14 @@
#define ACQUIRED_AFTER(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
#define ACQUIRED_BEFORE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
-#define EXCLUSIVE_LOCKS_REQUIRED(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
#define GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(guarded)
-#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable)
#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
-#define LOCKS_EXCLUDED(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
#define PT_GUARDED_BY(x)
// THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded_by(x))
#define PT_GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded)
#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
-#define SHARED_LOCKS_REQUIRED(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
#if defined(__clang__)
#define EXCLUSIVE_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
@@ -263,12 +259,43 @@
#define SHARED_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
#define SHARED_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
#define UNLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
+#define REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
+#define SHARED_REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
+#define CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(capability(__VA_ARGS__))
+#define SHARED_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_capability(__VA_ARGS__))
+#define ASSERT_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(__VA_ARGS__))
+#define ASSERT_SHARED_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(__VA_ARGS__))
+#define RETURN_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(__VA_ARGS__))
+#define TRY_ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
+#define TRY_ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
+#define ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
+#define ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
+#define RELEASE(...) THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
+#define RELEASE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
+#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
#else
#define EXCLUSIVE_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock(__VA_ARGS__))
#define EXCLUSIVE_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock(__VA_ARGS__))
#define SHARED_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_lock(__VA_ARGS__))
#define SHARED_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock(__VA_ARGS__))
#define UNLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(unlock(__VA_ARGS__))
+#define REQUIRES(...)
+#define SHARED_REQUIRES(...)
+#define CAPABILITY(...)
+#define SHARED_CAPABILITY(...)
+#define ASSERT_CAPABILITY(...)
+#define ASSERT_SHARED_CAPABILITY(...)
+#define RETURN_CAPABILITY(...)
+#define TRY_ACQUIRE(...)
+#define TRY_ACQUIRE_SHARED(...)
+#define ACQUIRE(...)
+#define ACQUIRE_SHARED(...)
+#define RELEASE(...)
+#define RELEASE_SHARED(...)
+#define SCOPED_CAPABILITY
#endif
+#define LOCKABLE CAPABILITY("mutex")
+#define SHARED_LOCKABLE SHARED_CAPABILITY("mutex")
+
#endif // ART_RUNTIME_BASE_MACROS_H_
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 5b258e5..6f82f28 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -43,8 +43,8 @@
namespace art {
-class LOCKABLE ReaderWriterMutex;
-class LOCKABLE MutatorMutex;
+class SHARED_LOCKABLE ReaderWriterMutex;
+class SHARED_LOCKABLE MutatorMutex;
class ScopedContentionRecorder;
class Thread;
@@ -214,35 +214,37 @@
virtual bool IsMutex() const { return true; }
// Block until mutex is free then acquire exclusive access.
- void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
- void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() { ExclusiveLock(self); }
+ void ExclusiveLock(Thread* self) ACQUIRE();
+ void Lock(Thread* self) ACQUIRE() { ExclusiveLock(self); }
// Returns true if acquires exclusive access, false otherwise.
- bool ExclusiveTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
- bool TryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true) { return ExclusiveTryLock(self); }
+ bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true);
+ bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); }
// Release exclusive access.
- void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
- void Unlock(Thread* self) UNLOCK_FUNCTION() { ExclusiveUnlock(self); }
+ void ExclusiveUnlock(Thread* self) RELEASE();
+ void Unlock(Thread* self) RELEASE() { ExclusiveUnlock(self); }
// Is the current thread the exclusive holder of the Mutex.
bool IsExclusiveHeld(const Thread* self) const;
// Assert that the Mutex is exclusively held by the current thread.
- void AssertExclusiveHeld(const Thread* self) {
+ void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
if (kDebugLocking && (gAborting == 0)) {
CHECK(IsExclusiveHeld(self)) << *this;
}
}
- void AssertHeld(const Thread* self) { AssertExclusiveHeld(self); }
+ void AssertHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
// Assert that the Mutex is not held by the current thread.
- void AssertNotHeldExclusive(const Thread* self) {
+ void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) {
if (kDebugLocking && (gAborting == 0)) {
CHECK(!IsExclusiveHeld(self)) << *this;
}
}
- void AssertNotHeld(const Thread* self) { AssertNotHeldExclusive(self); }
+ void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!*this) {
+ AssertNotHeldExclusive(self);
+ }
// Id associated with exclusive owner. No memory ordering semantics if called from a thread other
// than the owner.
@@ -255,6 +257,9 @@
virtual void Dump(std::ostream& os) const;
+ // For negative capabilities in clang annotations.
+ const Mutex& operator!() const { return *this; }
+
private:
#if ART_USE_FUTEXES
// 0 is unheld, 1 is held.
@@ -290,7 +295,7 @@
// Shared(n) | Block | error | SharedLock(n+1)* | Shared(n-1) or Free
// * for large values of n the SharedLock may block.
std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
-class LOCKABLE ReaderWriterMutex : public BaseMutex {
+class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
public:
explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
~ReaderWriterMutex();
@@ -298,12 +303,12 @@
virtual bool IsReaderWriterMutex() const { return true; }
// Block until ReaderWriterMutex is free then acquire exclusive access.
- void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
- void WriterLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() { ExclusiveLock(self); }
+ void ExclusiveLock(Thread* self) ACQUIRE();
+ void WriterLock(Thread* self) ACQUIRE() { ExclusiveLock(self); }
// Release exclusive access.
- void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
- void WriterUnlock(Thread* self) UNLOCK_FUNCTION() { ExclusiveUnlock(self); }
+ void ExclusiveUnlock(Thread* self) RELEASE();
+ void WriterUnlock(Thread* self) RELEASE() { ExclusiveUnlock(self); }
// Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
// or false if timeout is reached.
@@ -313,15 +318,15 @@
#endif
// Block until ReaderWriterMutex is shared or free then acquire a share on the access.
- void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
- void ReaderLock(Thread* self) SHARED_LOCK_FUNCTION() { SharedLock(self); }
+ void SharedLock(Thread* self) ACQUIRE_SHARED() ALWAYS_INLINE;
+ void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); }
// Try to acquire share of ReaderWriterMutex.
- bool SharedTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
+ bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true);
// Release a share of the access.
- void SharedUnlock(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
- void ReaderUnlock(Thread* self) UNLOCK_FUNCTION() { SharedUnlock(self); }
+ void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE;
+ void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); }
// Is the current thread the exclusive holder of the ReaderWriterMutex.
bool IsExclusiveHeld(const Thread* self) const;
@@ -368,6 +373,9 @@
virtual void Dump(std::ostream& os) const;
+ // For negative capabilities in clang annotations.
+ const ReaderWriterMutex& operator!() const { return *this; }
+
private:
#if ART_USE_FUTEXES
// Out-of-inline path for handling contention for a SharedLock.
@@ -402,7 +410,7 @@
// suspended states before exclusive ownership of the mutator mutex is sought.
//
std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu);
-class LOCKABLE MutatorMutex : public ReaderWriterMutex {
+class SHARED_LOCKABLE MutatorMutex : public ReaderWriterMutex {
public:
explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel)
: ReaderWriterMutex(name, level) {}
@@ -410,6 +418,9 @@
virtual bool IsMutatorMutex() const { return true; }
+ // For negative capabilities in clang annotations.
+ const MutatorMutex& operator!() const { return *this; }
+
private:
friend class Thread;
void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
@@ -458,7 +469,7 @@
// Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
// upon destruction.
-class SCOPED_LOCKABLE MutexLock {
+class SCOPED_CAPABILITY MutexLock {
public:
explicit MutexLock(Thread* self, Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : self_(self), mu_(mu) {
mu_.ExclusiveLock(self_);
@@ -478,7 +489,7 @@
// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
// construction and releases it upon destruction.
-class SCOPED_LOCKABLE ReaderMutexLock {
+class SCOPED_CAPABILITY ReaderMutexLock {
public:
explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
self_(self), mu_(mu) {
@@ -500,7 +511,7 @@
// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
// construction and releases it upon destruction.
-class SCOPED_LOCKABLE WriterMutexLock {
+class SCOPED_CAPABILITY WriterMutexLock {
public:
explicit WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
self_(self), mu_(mu) {
diff --git a/runtime/base/mutex_test.cc b/runtime/base/mutex_test.cc
index 3750c81..340550f 100644
--- a/runtime/base/mutex_test.cc
+++ b/runtime/base/mutex_test.cc
@@ -101,18 +101,18 @@
: mu("test mutex", kDefaultMutexLevel, true), cv("test condition variable", mu) {
}
- static void* Callback(void* arg) {
- RecursiveLockWait* state = reinterpret_cast<RecursiveLockWait*>(arg);
- state->mu.Lock(Thread::Current());
- state->cv.Signal(Thread::Current());
- state->mu.Unlock(Thread::Current());
- return nullptr;
- }
-
Mutex mu;
ConditionVariable cv;
};
+static void* RecursiveLockWaitCallback(void* arg) {
+ RecursiveLockWait* state = reinterpret_cast<RecursiveLockWait*>(arg);
+ state->mu.Lock(Thread::Current());
+ state->cv.Signal(Thread::Current());
+ state->mu.Unlock(Thread::Current());
+ return nullptr;
+}
+
// GCC has trouble with our mutex tests, so we have to turn off thread safety analysis.
static void RecursiveLockWaitTest() NO_THREAD_SAFETY_ANALYSIS {
RecursiveLockWait state;
@@ -120,8 +120,7 @@
state.mu.Lock(Thread::Current());
pthread_t pthread;
- int pthread_create_result = pthread_create(&pthread, nullptr, RecursiveLockWait::Callback,
- &state);
+ int pthread_create_result = pthread_create(&pthread, nullptr, RecursiveLockWaitCallback, &state);
ASSERT_EQ(0, pthread_create_result);
state.cv.Wait(Thread::Current());
diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h
index b300109..e10cd24 100644
--- a/runtime/base/timing_logger.h
+++ b/runtime/base/timing_logger.h
@@ -33,17 +33,17 @@
explicit CumulativeLogger(const std::string& name);
~CumulativeLogger();
void Start();
- void End() LOCKS_EXCLUDED(lock_);
- void Reset() LOCKS_EXCLUDED(lock_);
- void Dump(std::ostream& os) const LOCKS_EXCLUDED(lock_);
+ void End() REQUIRES(!lock_);
+ void Reset() REQUIRES(!lock_);
+ void Dump(std::ostream& os) const REQUIRES(!lock_);
uint64_t GetTotalNs() const {
return GetTotalTime() * kAdjust;
}
// Allow the name to be modified, particularly when the cumulative logger is a field within a
// parent class that is unable to determine the "name" of a sub-class.
- void SetName(const std::string& name) LOCKS_EXCLUDED(lock_);
- void AddLogger(const TimingLogger& logger) LOCKS_EXCLUDED(lock_);
- size_t GetIterations() const;
+ void SetName(const std::string& name) REQUIRES(!lock_);
+ void AddLogger(const TimingLogger& logger) REQUIRES(!lock_);
+ size_t GetIterations() const REQUIRES(!lock_);
private:
class HistogramComparator {
@@ -58,8 +58,8 @@
static constexpr size_t kInitialBucketSize = 50; // 50 microseconds.
void AddPair(const std::string &label, uint64_t delta_time)
- EXCLUSIVE_LOCKS_REQUIRED(lock_);
- void DumpHistogram(std::ostream &os) const EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ REQUIRES(lock_);
+ void DumpHistogram(std::ostream &os) const REQUIRES(lock_);
uint64_t GetTotalTime() const {
return total_time_;
}