Move to newer clang annotations
Also enable -Wthread-safety-negative.
Changes:
Switch to capabilities and negative capabilities.
Future work:
Use capabilities to implement uninterruptible annotations to work
with AssertNoThreadSuspension.
Bug: 20072211
Change-Id: I42fcbe0300d98a831c89d1eff3ecd5a7e99ebf33
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 45ed0cd..c726998 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -96,7 +96,7 @@
return Begin() <= byte_obj && byte_obj < End();
}
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Return true if the large object is a zygote large object. Potentially slow.
virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0;
@@ -130,11 +130,12 @@
// of malloc.
static LargeObjectMapSpace* Create(const std::string& name);
// Return the storage space required by obj.
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size);
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) REQUIRES(!lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated);
- size_t Free(Thread* self, mirror::Object* ptr);
- void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ REQUIRES(!lock_);
+ size_t Free(Thread* self, mirror::Object* ptr) REQUIRES(!lock_);
+ void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE REQUIRES(!lock_);
// TODO: disabling thread safety analysis as this may be called when we already hold lock_.
bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
@@ -146,8 +147,8 @@
explicit LargeObjectMapSpace(const std::string& name);
virtual ~LargeObjectMapSpace() {}
- bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE LOCKS_EXCLUDED(lock_);
- void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE LOCKS_EXCLUDED(lock_);
+ bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE REQUIRES(!lock_);
+ void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
// Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -163,12 +164,13 @@
virtual ~FreeListSpace();
static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
- EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ REQUIRES(lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
- size_t Free(Thread* self, mirror::Object* obj) OVERRIDE;
- void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
- void Dump(std::ostream& os) const;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ OVERRIDE REQUIRES(!lock_);
+ size_t Free(Thread* self, mirror::Object* obj) OVERRIDE REQUIRES(!lock_);
+ void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
+ void Dump(std::ostream& os) const REQUIRES(!lock_);
protected:
FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end);
@@ -186,9 +188,9 @@
return GetAllocationAddressForSlot(GetSlotIndexForAllocationInfo(info));
}
// Removes header from the free blocks set by finding the corresponding iterator and erasing it.
- void RemoveFreePrev(AllocationInfo* info) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_);
bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE;
- void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE;
+ void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
class SortByPrevFree {
public: