ART: SHARED_REQUIRES to REQUIRES_SHARED

This coincides with the actual attribute name and upstream usage.
Preparation for deferring to libbase.

Test: m
Test: m test-art-host
Change-Id: Ia8986b5dfd926ba772bf00b0a35eaf83596d8518
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index 2263797..45cea5a 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -87,7 +87,7 @@
 }
 
 inline size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   size_t num_bytes = obj->SizeOf();
   if (usable_size != nullptr) {
     *usable_size = RoundUp(num_bytes, kAlignment);
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 0e27d84..e9982e9 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -58,7 +58,7 @@
 
   // Return the storage space required by obj.
   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     return AllocationSizeNonvirtual(obj, usable_size);
   }
 
@@ -72,7 +72,7 @@
   }
 
   size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
   // maximum reserved size of the heap.
@@ -110,9 +110,9 @@
   void AssertAllThreadLocalBuffersAreRevoked()
       REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
 
-  uint64_t GetBytesAllocated() SHARED_REQUIRES(Locks::mutator_lock_)
+  uint64_t GetBytesAllocated() REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
-  uint64_t GetObjectsAllocated() SHARED_REQUIRES(Locks::mutator_lock_)
+  uint64_t GetObjectsAllocated() REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
   bool IsEmpty() const {
     return Begin() == End();
@@ -132,7 +132,7 @@
 
   // Return the object which comes after obj, while ensuring alignment.
   static mirror::Object* GetNextObject(mirror::Object* obj)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Allocate a new TLAB, returns false if the allocation failed.
   bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_);
@@ -143,7 +143,7 @@
 
   // Go through all of the blocks and visit the continuous objects.
   void Walk(ObjectCallback* callback, void* arg)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!block_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!block_lock_);
 
   accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
 
@@ -154,7 +154,7 @@
   }
 
   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Object alignment within the space.
   static constexpr size_t kAlignment = 8;
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 455d28e..9282ec7 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -319,7 +319,7 @@
 namespace allocator {
 
 // Implement the dlmalloc morecore callback.
-void* ArtDlMallocMoreCore(void* mspace, intptr_t increment) SHARED_REQUIRES(Locks::mutator_lock_) {
+void* ArtDlMallocMoreCore(void* mspace, intptr_t increment) REQUIRES_SHARED(Locks::mutator_lock_) {
   Runtime* runtime = Runtime::Current();
   Heap* heap = runtime->GetHeap();
   ::art::gc::space::DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace();
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index eab757a..8fb2d76 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -65,11 +65,11 @@
   // Virtual to allow MemoryToolMallocSpace to intercept.
   virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
       REQUIRES(!lock_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   // Virtual to allow MemoryToolMallocSpace to intercept.
   virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
       REQUIRES(!lock_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
     return num_bytes;
@@ -136,7 +136,7 @@
   }
 
   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
  protected:
   DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name, void* mspace,
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index c87312b..e41c532 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -456,7 +456,7 @@
                                           bool is_global_cache,
                                           bool validate_oat_file,
                                           std::string* error_msg)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     // Note that we must not use the file descriptor associated with
     // ScopedFlock::GetFile to Init the image file. We want the file
     // descriptor (and the associated exclusive lock) to be released when
@@ -492,7 +492,7 @@
                                           bool validate_oat_file,
                                           const OatFile* oat_file,
                                           std::string* error_msg)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     CHECK(image_filename != nullptr);
     CHECK(image_location != nullptr);
 
@@ -865,14 +865,14 @@
     explicit FixupRootVisitor(Args... args) : FixupVisitor(args...) {}
 
     ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
-        SHARED_REQUIRES(Locks::mutator_lock_) {
+        REQUIRES_SHARED(Locks::mutator_lock_) {
       if (!root->IsNull()) {
         VisitRoot(root);
       }
     }
 
     ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
-        SHARED_REQUIRES(Locks::mutator_lock_) {
+        REQUIRES_SHARED(Locks::mutator_lock_) {
       mirror::Object* ref = root->AsMirrorPtr();
       mirror::Object* new_ref = ForwardObject(ref);
       if (ref != new_ref) {
@@ -936,7 +936,7 @@
 
     // java.lang.ref.Reference visitor.
     void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
-        SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+        REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
       mirror::Object* obj = ref->GetReferent<kWithoutReadBarrier>();
       ref->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
           mirror::Reference::ReferentOffset(),
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 534232d..c407259 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -47,13 +47,13 @@
                                      InstructionSet image_isa,
                                      bool secondary_image,
                                      std::string* error_msg)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Try to open an existing app image space.
   static std::unique_ptr<ImageSpace> CreateFromAppImage(const char* image,
                                                         const OatFile* oat_file,
                                                         std::string* error_msg)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Reads the image header from the specified image location for the
   // instruction set image_isa. Returns null on failure, with
@@ -70,7 +70,7 @@
   std::unique_ptr<const OatFile> ReleaseOatFile();
 
   void VerifyImageAllocations()
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   const ImageHeader& GetImageHeader() const {
     return *reinterpret_cast<ImageHeader*>(Begin());
@@ -158,7 +158,7 @@
                                           bool validate_oat_file,
                                           const OatFile* oat_file,
                                           std::string* error_msg)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   static Atomic<uint32_t> bitmap_index_;
 
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index c726998..0320e79 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -96,7 +96,7 @@
     return Begin() <= byte_obj && byte_obj < End();
   }
   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Return true if the large object is a zygote large object. Potentially slow.
   virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0;
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index c6b2870..f85ea46 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -63,9 +63,9 @@
   // amount of the storage space that may be used by obj.
   virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
   virtual size_t Free(Thread* self, mirror::Object* ptr)
-      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+      REQUIRES_SHARED(Locks::mutator_lock_) = 0;
   virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
-      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+      REQUIRES_SHARED(Locks::mutator_lock_) = 0;
 
   // Returns the maximum bytes that could be allocated for the given
   // size in bulk, that is the maximum value for the
@@ -160,7 +160,7 @@
                                 size_t maximum_size, bool low_memory_mode) = 0;
 
   virtual void RegisterRecentFree(mirror::Object* ptr)
-      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(lock_);
 
   virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
@@ -196,7 +196,7 @@
 
  private:
   static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   DISALLOW_COPY_AND_ASSIGN(MallocSpace);
 };
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index c081011..e53f009 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -43,10 +43,10 @@
   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
 
   size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) OVERRIDE {}
 
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 823aa38..4e57a85 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -62,11 +62,11 @@
 
   // Return the storage space required by obj.
   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_) {
     return AllocationSizeNonvirtual(obj, usable_size);
   }
   size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
 
   size_t Free(Thread*, mirror::Object*) OVERRIDE {
     UNIMPLEMENTED(FATAL);
@@ -163,7 +163,7 @@
     return nullptr;
   }
   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
 
   // Object alignment within the space.
   static constexpr size_t kAlignment = kObjectAlignment;
@@ -503,7 +503,7 @@
   }
 
   mirror::Object* GetNextObject(mirror::Object* obj)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index b016095..8ccbfaa 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -379,7 +379,7 @@
 
 // Callback from rosalloc when it needs to increase the footprint.
 void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   Heap* heap = Runtime::Current()->GetHeap();
   art::gc::space::RosAllocSpace* rosalloc_space = heap->GetRosAllocSpace(rosalloc);
   DCHECK(rosalloc_space != nullptr);
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index b175fbf..f9c7dbc 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -64,9 +64,9 @@
     return AllocationSizeNonvirtual<true>(obj, usable_size);
   }
   size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                   size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 23e937d..bd600fe 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -52,7 +52,7 @@
     heap->SetSpaceAsDefault(space);
   }
 
-  mirror::Class* GetByteArrayClass(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
+  mirror::Class* GetByteArrayClass(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
     StackHandleScope<1> hs(self);
     auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
     if (byte_array_class_ == nullptr) {
@@ -71,7 +71,7 @@
                         size_t* bytes_allocated,
                         size_t* usable_size,
                         size_t* bytes_tl_bulk_allocated)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
     mirror::Object* obj = alloc_space->Alloc(self,
@@ -91,7 +91,7 @@
                                   size_t* bytes_allocated,
                                   size_t* usable_size,
                                   size_t* bytes_tl_bulk_allocated)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
     mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size,
@@ -103,7 +103,7 @@
   }
 
   void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     // Note the minimum size, which is the size of a zero-length byte array.
     EXPECT_GE(size, SizeOfZeroLengthByteArray());
     EXPECT_TRUE(byte_array_class != nullptr);
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index f2889e2..4d10de8 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -33,7 +33,7 @@
   static ZygoteSpace* Create(const std::string& name, MemMap* mem_map,
                              accounting::ContinuousSpaceBitmap* live_bitmap,
                              accounting::ContinuousSpaceBitmap* mark_bitmap)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void Dump(std::ostream& os) const;
 
@@ -77,7 +77,7 @@
   }
 
   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
  protected:
   virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {