Generalize Valgrind annotations in ART to support ASan.
Also add redzones around non-fixed mem_map(s).
Also extend -Wframe-larger-than limit to enable arm64 ASan build.
Change-Id: Ie572481a25fead59fc8978d2c317a33ac418516c
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 5237c7b..e1c5b64 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -20,13 +20,13 @@
#include "gc/accounting/card_table.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
+#include "memory_tool_malloc_space-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "runtime.h"
#include "thread.h"
#include "thread_list.h"
#include "utils.h"
-#include "valgrind_malloc_space-inl.h"
namespace art {
namespace gc {
@@ -62,8 +62,8 @@
// Everything is set so record in immutable structure and leave
uint8_t* begin = mem_map->Begin();
- if (Runtime::Current()->RunningOnValgrind()) {
- return new ValgrindMallocSpace<DlMallocSpace, kDefaultValgrindRedZoneBytes, true, false>(
+ if (Runtime::Current()->IsRunningOnMemoryTool()) {
+ return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
mem_map, initial_size, name, mspace, begin, end, begin + capacity, growth_limit,
can_move_objects, starting_size);
} else {
@@ -152,8 +152,8 @@
void* allocator, uint8_t* begin, uint8_t* end,
uint8_t* limit, size_t growth_limit,
bool can_move_objects) {
- if (Runtime::Current()->RunningOnValgrind()) {
- return new ValgrindMallocSpace<DlMallocSpace, kDefaultValgrindRedZoneBytes, true, false>(
+ if (Runtime::Current()->IsRunningOnMemoryTool()) {
+ return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
mem_map, initial_size_, name, allocator, begin, end, limit, growth_limit,
can_move_objects, starting_size_);
} else {
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 1f80f1f..ab527a4 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -30,7 +30,7 @@
namespace space {
// An alloc space is a space where objects may be allocated and garbage collected. Not final as may
-// be overridden by a ValgrindMallocSpace.
+// be overridden by a MemoryToolMallocSpace.
class DlMallocSpace : public MallocSpace {
public:
// Create a DlMallocSpace from an existing mem_map.
@@ -46,27 +46,27 @@
static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
size_t capacity, uint8_t* requested_begin, bool can_move_objects);
- // Virtual to allow ValgrindMallocSpace to intercept.
+ // Virtual to allow MemoryToolMallocSpace to intercept.
virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated)
OVERRIDE LOCKS_EXCLUDED(lock_);
- // Virtual to allow ValgrindMallocSpace to intercept.
+ // Virtual to allow MemoryToolMallocSpace to intercept.
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
OVERRIDE LOCKS_EXCLUDED(lock_) {
return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
- // Virtual to allow ValgrindMallocSpace to intercept.
+ // Virtual to allow MemoryToolMallocSpace to intercept.
virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
return AllocationSizeNonvirtual(obj, usable_size);
}
- // Virtual to allow ValgrindMallocSpace to intercept.
+ // Virtual to allow MemoryToolMallocSpace to intercept.
virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
LOCKS_EXCLUDED(lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Virtual to allow ValgrindMallocSpace to intercept.
+ // Virtual to allow MemoryToolMallocSpace to intercept.
virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
LOCKS_EXCLUDED(lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 2b567fe..a913e59 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -34,12 +34,12 @@
namespace gc {
namespace space {
-class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
+class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
public:
- explicit ValgrindLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
+ explicit MemoryToolLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
}
- ~ValgrindLargeObjectMapSpace() OVERRIDE {
+ ~MemoryToolLargeObjectMapSpace() OVERRIDE {
// Keep valgrind happy if there is any large objects such as dex cache arrays which aren't
// freed since they are held live by the class linker.
MutexLock mu(Thread::Current(), lock_);
@@ -52,13 +52,14 @@
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
OVERRIDE {
mirror::Object* obj =
- LargeObjectMapSpace::Alloc(self, num_bytes + kValgrindRedZoneBytes * 2, bytes_allocated,
+ LargeObjectMapSpace::Alloc(self, num_bytes + kMemoryToolRedZoneBytes * 2, bytes_allocated,
usable_size, bytes_tl_bulk_allocated);
mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
- reinterpret_cast<uintptr_t>(obj) + kValgrindRedZoneBytes);
- VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<void*>(obj), kValgrindRedZoneBytes);
- VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes,
- kValgrindRedZoneBytes);
+ reinterpret_cast<uintptr_t>(obj) + kMemoryToolRedZoneBytes);
+ MEMORY_TOOL_MAKE_NOACCESS(reinterpret_cast<void*>(obj), kMemoryToolRedZoneBytes);
+ MEMORY_TOOL_MAKE_NOACCESS(
+ reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes,
+ kMemoryToolRedZoneBytes);
if (usable_size != nullptr) {
*usable_size = num_bytes; // Since we have redzones, shrink the usable size.
}
@@ -75,7 +76,7 @@
size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
mirror::Object* object_with_rdz = ObjectWithRedzone(obj);
- VALGRIND_MAKE_MEM_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
+ MEMORY_TOOL_MAKE_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
return LargeObjectMapSpace::Free(self, object_with_rdz);
}
@@ -86,15 +87,15 @@
private:
static const mirror::Object* ObjectWithRedzone(const mirror::Object* obj) {
return reinterpret_cast<const mirror::Object*>(
- reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
+ reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes);
}
static mirror::Object* ObjectWithRedzone(mirror::Object* obj) {
return reinterpret_cast<mirror::Object*>(
- reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
+ reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes);
}
- static constexpr size_t kValgrindRedZoneBytes = kPageSize;
+ static constexpr size_t kMemoryToolRedZoneBytes = kPageSize;
};
void LargeObjectSpace::SwapBitmaps() {
@@ -121,8 +122,8 @@
lock_("large object map space lock", kAllocSpaceLock) {}
LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
- if (Runtime::Current()->RunningOnValgrind()) {
- return new ValgrindLargeObjectMapSpace(name);
+ if (Runtime::Current()->IsRunningOnMemoryTool()) {
+ return new MemoryToolLargeObjectMapSpace(name);
} else {
return new LargeObjectMapSpace(name);
}
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 9495864..6c689cd 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -20,6 +20,7 @@
#include "space.h"
#include <ostream>
+#include "base/memory_tool.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/space/valgrind_malloc_space-inl.h b/runtime/gc/space/memory_tool_malloc_space-inl.h
similarity index 72%
rename from runtime/gc/space/valgrind_malloc_space-inl.h
rename to runtime/gc/space/memory_tool_malloc_space-inl.h
index bc329e1..ea8b8aa 100644
--- a/runtime/gc/space/valgrind_malloc_space-inl.h
+++ b/runtime/gc/space/memory_tool_malloc_space-inl.h
@@ -14,22 +14,20 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_INL_H_
-#define ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_INL_H_
+#ifndef ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_INL_H_
+#define ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_INL_H_
-#include "valgrind_malloc_space.h"
-
-#include <memcheck/memcheck.h>
-
-#include "valgrind_settings.h"
+#include "base/memory_tool.h"
+#include "memory_tool_malloc_space.h"
+#include "memory_tool_settings.h"
namespace art {
namespace gc {
namespace space {
-namespace valgrind_details {
+namespace memory_tool_details {
-template <size_t kValgrindRedZoneBytes, bool kUseObjSizeForUsable>
+template <size_t kMemoryToolRedZoneBytes, bool kUseObjSizeForUsable>
inline mirror::Object* AdjustForValgrind(void* obj_with_rdz, size_t num_bytes,
size_t bytes_allocated, size_t usable_size,
size_t bytes_tl_bulk_allocated,
@@ -48,26 +46,26 @@
if (kUseObjSizeForUsable) {
*usable_size_out = num_bytes;
} else {
- *usable_size_out = usable_size - 2 * kValgrindRedZoneBytes;
+ *usable_size_out = usable_size - 2 * kMemoryToolRedZoneBytes;
}
}
// Left redzone.
- VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
+ MEMORY_TOOL_MAKE_NOACCESS(obj_with_rdz, kMemoryToolRedZoneBytes);
// Make requested memory readable.
// (If the allocator assumes memory is zeroed out, we might get UNDEFINED warnings, so make
// everything DEFINED initially.)
mirror::Object* result = reinterpret_cast<mirror::Object*>(
- reinterpret_cast<uint8_t*>(obj_with_rdz) + kValgrindRedZoneBytes);
- VALGRIND_MAKE_MEM_DEFINED(result, num_bytes);
+ reinterpret_cast<uint8_t*>(obj_with_rdz) + kMemoryToolRedZoneBytes);
+ MEMORY_TOOL_MAKE_DEFINED(result, num_bytes);
// Right redzone. Assumes that if bytes_allocated > usable_size, then the difference is
// management data at the upper end, and for simplicity we will not protect that.
// At the moment, this fits RosAlloc (no management data in a slot, usable_size == alloc_size)
// and DlMalloc (allocation_size = (usable_size == num_bytes) + 4, 4 is management)
- VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(result) + num_bytes,
- usable_size - (num_bytes + kValgrindRedZoneBytes));
+ MEMORY_TOOL_MAKE_NOACCESS(reinterpret_cast<uint8_t*>(result) + num_bytes,
+ usable_size - (num_bytes + kMemoryToolRedZoneBytes));
return result;
}
@@ -76,15 +74,15 @@
return obj->SizeOf<kVerifyNone>();
}
-} // namespace valgrind_details
+} // namespace memory_tool_details
template <typename S,
- size_t kValgrindRedZoneBytes,
+ size_t kMemoryToolRedZoneBytes,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
mirror::Object*
-ValgrindMallocSpace<S,
- kValgrindRedZoneBytes,
+MemoryToolMallocSpace<S,
+ kMemoryToolRedZoneBytes,
kAdjustForRedzoneInAllocSize,
kUseObjSizeForUsable>::AllocWithGrowth(
Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
@@ -92,14 +90,14 @@
size_t bytes_allocated;
size_t usable_size;
size_t bytes_tl_bulk_allocated;
- void* obj_with_rdz = S::AllocWithGrowth(self, num_bytes + 2 * kValgrindRedZoneBytes,
+ void* obj_with_rdz = S::AllocWithGrowth(self, num_bytes + 2 * kMemoryToolRedZoneBytes,
&bytes_allocated, &usable_size,
&bytes_tl_bulk_allocated);
if (obj_with_rdz == nullptr) {
return nullptr;
}
- return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes, kUseObjSizeForUsable>(
+ return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
obj_with_rdz, num_bytes,
bytes_allocated, usable_size,
bytes_tl_bulk_allocated,
@@ -109,11 +107,11 @@
}
template <typename S,
- size_t kValgrindRedZoneBytes,
+ size_t kMemoryToolRedZoneBytes,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
-mirror::Object* ValgrindMallocSpace<S,
- kValgrindRedZoneBytes,
+mirror::Object* MemoryToolMallocSpace<S,
+ kMemoryToolRedZoneBytes,
kAdjustForRedzoneInAllocSize,
kUseObjSizeForUsable>::Alloc(
Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
@@ -121,13 +119,13 @@
size_t bytes_allocated;
size_t usable_size;
size_t bytes_tl_bulk_allocated;
- void* obj_with_rdz = S::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes,
+ void* obj_with_rdz = S::Alloc(self, num_bytes + 2 * kMemoryToolRedZoneBytes,
&bytes_allocated, &usable_size, &bytes_tl_bulk_allocated);
if (obj_with_rdz == nullptr) {
return nullptr;
}
- return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes,
+ return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes,
kUseObjSizeForUsable>(obj_with_rdz, num_bytes,
bytes_allocated, usable_size,
bytes_tl_bulk_allocated,
@@ -137,11 +135,11 @@
}
template <typename S,
- size_t kValgrindRedZoneBytes,
+ size_t kMemoryToolRedZoneBytes,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
-mirror::Object* ValgrindMallocSpace<S,
- kValgrindRedZoneBytes,
+mirror::Object* MemoryToolMallocSpace<S,
+ kMemoryToolRedZoneBytes,
kAdjustForRedzoneInAllocSize,
kUseObjSizeForUsable>::AllocThreadUnsafe(
Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
@@ -149,14 +147,14 @@
size_t bytes_allocated;
size_t usable_size;
size_t bytes_tl_bulk_allocated;
- void* obj_with_rdz = S::AllocThreadUnsafe(self, num_bytes + 2 * kValgrindRedZoneBytes,
+ void* obj_with_rdz = S::AllocThreadUnsafe(self, num_bytes + 2 * kMemoryToolRedZoneBytes,
&bytes_allocated, &usable_size,
&bytes_tl_bulk_allocated);
if (obj_with_rdz == nullptr) {
return nullptr;
}
- return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes, kUseObjSizeForUsable>(
+ return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
obj_with_rdz, num_bytes,
bytes_allocated, usable_size,
bytes_tl_bulk_allocated,
@@ -166,38 +164,39 @@
}
template <typename S,
- size_t kValgrindRedZoneBytes,
+ size_t kMemoryToolRedZoneBytes,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
-size_t ValgrindMallocSpace<S,
- kValgrindRedZoneBytes,
+size_t MemoryToolMallocSpace<S,
+ kMemoryToolRedZoneBytes,
kAdjustForRedzoneInAllocSize,
kUseObjSizeForUsable>::AllocationSize(
mirror::Object* obj, size_t* usable_size) {
size_t result = S::AllocationSize(reinterpret_cast<mirror::Object*>(
- reinterpret_cast<uint8_t*>(obj) - (kAdjustForRedzoneInAllocSize ? kValgrindRedZoneBytes : 0)),
+ reinterpret_cast<uint8_t*>(obj) - (kAdjustForRedzoneInAllocSize ? kMemoryToolRedZoneBytes : 0)),
usable_size);
if (usable_size != nullptr) {
if (kUseObjSizeForUsable) {
- *usable_size = valgrind_details::GetObjSizeNoThreadSafety(obj);
+ *usable_size = memory_tool_details::GetObjSizeNoThreadSafety(obj);
} else {
- *usable_size = *usable_size - 2 * kValgrindRedZoneBytes;
+ *usable_size = *usable_size - 2 * kMemoryToolRedZoneBytes;
}
}
return result;
}
template <typename S,
- size_t kValgrindRedZoneBytes,
+ size_t kMemoryToolRedZoneBytes,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
-size_t ValgrindMallocSpace<S,
- kValgrindRedZoneBytes,
+size_t MemoryToolMallocSpace<S,
+ kMemoryToolRedZoneBytes,
kAdjustForRedzoneInAllocSize,
kUseObjSizeForUsable>::Free(
Thread* self, mirror::Object* ptr) {
void* obj_after_rdz = reinterpret_cast<void*>(ptr);
- uint8_t* obj_with_rdz = reinterpret_cast<uint8_t*>(obj_after_rdz) - kValgrindRedZoneBytes;
+ uint8_t* obj_with_rdz = reinterpret_cast<uint8_t*>(obj_after_rdz) - kMemoryToolRedZoneBytes;
+
// Make redzones undefined.
size_t usable_size;
size_t allocation_size = AllocationSize(ptr, &usable_size);
@@ -206,20 +205,20 @@
// Use the obj-size-for-usable flag to determine whether usable_size is the more important one,
// e.g., whether there's data in the allocation_size (and usable_size can't be trusted).
if (kUseObjSizeForUsable) {
- VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, allocation_size);
+ MEMORY_TOOL_MAKE_UNDEFINED(obj_with_rdz, allocation_size);
} else {
- VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, usable_size + 2 * kValgrindRedZoneBytes);
+ MEMORY_TOOL_MAKE_UNDEFINED(obj_with_rdz, usable_size + 2 * kMemoryToolRedZoneBytes);
}
return S::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
}
template <typename S,
- size_t kValgrindRedZoneBytes,
+ size_t kMemoryToolRedZoneBytes,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
-size_t ValgrindMallocSpace<S,
- kValgrindRedZoneBytes,
+size_t MemoryToolMallocSpace<S,
+ kMemoryToolRedZoneBytes,
kAdjustForRedzoneInAllocSize,
kUseObjSizeForUsable>::FreeList(
Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
@@ -232,32 +231,33 @@
}
template <typename S,
- size_t kValgrindRedZoneBytes,
+ size_t kMemoryToolRedZoneBytes,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
template <typename... Params>
-ValgrindMallocSpace<S,
- kValgrindRedZoneBytes,
+MemoryToolMallocSpace<S,
+ kMemoryToolRedZoneBytes,
kAdjustForRedzoneInAllocSize,
- kUseObjSizeForUsable>::ValgrindMallocSpace(
+ kUseObjSizeForUsable>::MemoryToolMallocSpace(
MemMap* mem_map, size_t initial_size, Params... params) : S(mem_map, initial_size, params...) {
- VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size,
- mem_map->Size() - initial_size);
+ MEMORY_TOOL_MAKE_DEFINED(mem_map->Begin(), initial_size);
+ MEMORY_TOOL_MAKE_UNDEFINED(mem_map->Begin() + initial_size,
+ mem_map->Size() - initial_size);
}
template <typename S,
- size_t kValgrindRedZoneBytes,
+ size_t kMemoryToolRedZoneBytes,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
-size_t ValgrindMallocSpace<S,
- kValgrindRedZoneBytes,
+size_t MemoryToolMallocSpace<S,
+ kMemoryToolRedZoneBytes,
kAdjustForRedzoneInAllocSize,
kUseObjSizeForUsable>::MaxBytesBulkAllocatedFor(size_t num_bytes) {
- return S::MaxBytesBulkAllocatedFor(num_bytes + 2 * kValgrindRedZoneBytes);
+ return S::MaxBytesBulkAllocatedFor(num_bytes + 2 * kMemoryToolRedZoneBytes);
}
} // namespace space
} // namespace gc
} // namespace art
-#endif // ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_INL_H_
+#endif // ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_INL_H_
diff --git a/runtime/gc/space/valgrind_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
similarity index 78%
rename from runtime/gc/space/valgrind_malloc_space.h
rename to runtime/gc/space/memory_tool_malloc_space.h
index a6b010a..64c6f35 100644
--- a/runtime/gc/space/valgrind_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -14,24 +14,22 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_H_
-#define ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_H_
+#ifndef ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_H_
+#define ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_H_
#include "malloc_space.h"
-#include <valgrind.h>
-
namespace art {
namespace gc {
namespace space {
-// A specialization of DlMallocSpace/RosAllocSpace that places valgrind red zones around
-// allocations.
+// A specialization of DlMallocSpace/RosAllocSpace that places memory tool red
+// zones around allocations.
template <typename BaseMallocSpaceType,
- size_t kValgrindRedZoneBytes,
+ size_t kMemoryToolRedZoneBytes,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
-class ValgrindMallocSpace FINAL : public BaseMallocSpaceType {
+class MemoryToolMallocSpace FINAL : public BaseMallocSpaceType {
public:
mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
@@ -57,15 +55,15 @@
size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE;
template <typename... Params>
- explicit ValgrindMallocSpace(MemMap* mem_map, size_t initial_size, Params... params);
- virtual ~ValgrindMallocSpace() {}
+ explicit MemoryToolMallocSpace(MemMap* mem_map, size_t initial_size, Params... params);
+ virtual ~MemoryToolMallocSpace() {}
private:
- DISALLOW_COPY_AND_ASSIGN(ValgrindMallocSpace);
+ DISALLOW_COPY_AND_ASSIGN(MemoryToolMallocSpace);
};
} // namespace space
} // namespace gc
} // namespace art
-#endif // ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_H_
+#endif // ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_H_
diff --git a/runtime/gc/space/valgrind_settings.h b/runtime/gc/space/memory_tool_settings.h
similarity index 80%
rename from runtime/gc/space/valgrind_settings.h
rename to runtime/gc/space/memory_tool_settings.h
index 73da0fd..e9333c8 100644
--- a/runtime/gc/space/valgrind_settings.h
+++ b/runtime/gc/space/memory_tool_settings.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_GC_SPACE_VALGRIND_SETTINGS_H_
-#define ART_RUNTIME_GC_SPACE_VALGRIND_SETTINGS_H_
+#ifndef ART_RUNTIME_GC_SPACE_MEMORY_TOOL_SETTINGS_H_
+#define ART_RUNTIME_GC_SPACE_MEMORY_TOOL_SETTINGS_H_
namespace art {
namespace gc {
@@ -23,10 +23,10 @@
// Default number of bytes to use as a red zone (rdz). A red zone of this size will be placed before
// and after each allocation. 8 bytes provides long/double alignment.
-static constexpr size_t kDefaultValgrindRedZoneBytes = 8;
+static constexpr size_t kDefaultMemoryToolRedZoneBytes = 8;
} // namespace space
} // namespace gc
} // namespace art
-#endif // ART_RUNTIME_GC_SPACE_VALGRIND_SETTINGS_H_
+#endif // ART_RUNTIME_GC_SPACE_MEMORY_TOOL_SETTINGS_H_
diff --git a/runtime/gc/space/rosalloc_space-inl.h b/runtime/gc/space/rosalloc_space-inl.h
index f94ec23..8bff2b4 100644
--- a/runtime/gc/space/rosalloc_space-inl.h
+++ b/runtime/gc/space/rosalloc_space-inl.h
@@ -17,10 +17,9 @@
#ifndef ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_INL_H_
#define ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_INL_H_
-#include <valgrind.h>
-
+#include "base/memory_tool.h"
#include "gc/allocator/rosalloc-inl.h"
-#include "gc/space/valgrind_settings.h"
+#include "gc/space/memory_tool_settings.h"
#include "rosalloc_space.h"
#include "thread.h"
@@ -28,26 +27,26 @@
namespace gc {
namespace space {
-template<bool kMaybeRunningOnValgrind>
+template<bool kMaybeIsRunningOnMemoryTool>
inline size_t RosAllocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
// obj is a valid object. Use its class in the header to get the size.
// Don't use verification since the object may be dead if we are sweeping.
size_t size = obj->SizeOf<kVerifyNone>();
- bool running_on_valgrind = false;
- if (kMaybeRunningOnValgrind) {
- running_on_valgrind = RUNNING_ON_VALGRIND != 0;
- if (running_on_valgrind) {
- size += 2 * kDefaultValgrindRedZoneBytes;
+ bool add_redzones = false;
+ if (kMaybeIsRunningOnMemoryTool) {
+ add_redzones = RUNNING_ON_MEMORY_TOOL ? kMemoryToolAddsRedzones : 0;
+ if (add_redzones) {
+ size += 2 * kDefaultMemoryToolRedZoneBytes;
}
} else {
- DCHECK_EQ(RUNNING_ON_VALGRIND, 0U);
+ DCHECK_EQ(RUNNING_ON_MEMORY_TOOL, 0U);
}
size_t size_by_size = rosalloc_->UsableSize(size);
if (kIsDebugBuild) {
- // On valgrind, the red zone has an impact...
+ // On memory tool, the red zone has an impact...
const uint8_t* obj_ptr = reinterpret_cast<const uint8_t*>(obj);
size_t size_by_ptr = rosalloc_->UsableSize(
- obj_ptr - (running_on_valgrind ? kDefaultValgrindRedZoneBytes : 0));
+ obj_ptr - (add_redzones ? kDefaultMemoryToolRedZoneBytes : 0));
if (size_by_size != size_by_ptr) {
LOG(INFO) << "Found a bad sized obj of size " << size
<< " at " << std::hex << reinterpret_cast<intptr_t>(obj_ptr) << std::dec
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index bc4414d..1a193c3 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -30,7 +30,7 @@
#include "thread.h"
#include "thread_list.h"
#include "utils.h"
-#include "valgrind_malloc_space-inl.h"
+#include "memory_tool_malloc_space-inl.h"
namespace art {
namespace gc {
@@ -43,7 +43,7 @@
static constexpr bool kVerifyFreedBytes = false;
// TODO: Fix
-// template class ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
+// template class MemoryToolMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
RosAllocSpace::RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
art::gc::allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end,
@@ -61,10 +61,10 @@
bool low_memory_mode, bool can_move_objects) {
DCHECK(mem_map != nullptr);
- bool running_on_valgrind = Runtime::Current()->RunningOnValgrind();
+ bool running_on_memory_tool = Runtime::Current()->IsRunningOnMemoryTool();
allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
- capacity, low_memory_mode, running_on_valgrind);
+ capacity, low_memory_mode, running_on_memory_tool);
if (rosalloc == nullptr) {
LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
return nullptr;
@@ -78,10 +78,10 @@
// Everything is set so record in immutable structure and leave
uint8_t* begin = mem_map->Begin();
- // TODO: Fix RosAllocSpace to support valgrind. There is currently some issues with
+ // TODO: Fix RosAllocSpace to support Valgrind/ASan. There is currently some issues with
// AllocationSize caused by redzones. b/12944686
- if (running_on_valgrind) {
- return new ValgrindMallocSpace<RosAllocSpace, kDefaultValgrindRedZoneBytes, false, true>(
+ if (running_on_memory_tool) {
+ return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
mem_map, initial_size, name, rosalloc, begin, end, begin + capacity, growth_limit,
can_move_objects, starting_size, low_memory_mode);
} else {
@@ -134,7 +134,7 @@
allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_start,
size_t initial_size,
size_t maximum_size, bool low_memory_mode,
- bool running_on_valgrind) {
+ bool running_on_memory_tool) {
// clear errno to allow PLOG on error
errno = 0;
// create rosalloc using our backing storage starting at begin and
@@ -145,7 +145,7 @@
low_memory_mode ?
art::gc::allocator::RosAlloc::kPageReleaseModeAll :
art::gc::allocator::RosAlloc::kPageReleaseModeSizeAndEnd,
- running_on_valgrind);
+ running_on_memory_tool);
if (rosalloc != nullptr) {
rosalloc->SetFootprintLimit(initial_size);
} else {
@@ -180,8 +180,8 @@
void* allocator, uint8_t* begin, uint8_t* end,
uint8_t* limit, size_t growth_limit,
bool can_move_objects) {
- if (Runtime::Current()->RunningOnValgrind()) {
- return new ValgrindMallocSpace<RosAllocSpace, kDefaultValgrindRedZoneBytes, false, true>(
+ if (Runtime::Current()->IsRunningOnMemoryTool()) {
+ return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
mem_map, initial_size_, name, reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end,
limit, growth_limit, can_move_objects, starting_size_, low_memory_mode_);
} else {
@@ -370,7 +370,7 @@
delete rosalloc_;
rosalloc_ = CreateRosAlloc(mem_map_->Begin(), starting_size_, initial_size_,
NonGrowthLimitCapacity(), low_memory_mode_,
- Runtime::Current()->RunningOnValgrind());
+ Runtime::Current()->IsRunningOnMemoryTool());
SetFootprintLimit(footprint_limit);
}
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 36268f7..9dc6f31 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -31,7 +31,7 @@
namespace space {
// An alloc space implemented using a runs-of-slots memory allocator. Not final as may be
-// overridden by a ValgrindMallocSpace.
+// overridden by a MemoryToolMallocSpace.
class RosAllocSpace : public MallocSpace {
public:
// Create a RosAllocSpace with the requested sizes. The requested
@@ -95,7 +95,7 @@
ALWAYS_INLINE size_t MaxBytesBulkAllocatedForNonvirtual(size_t num_bytes);
// TODO: NO_THREAD_SAFETY_ANALYSIS because SizeOf() requires that mutator_lock is held.
- template<bool kMaybeRunningOnValgrind>
+ template<bool kMaybeIsRunningOnMemoryTool>
size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
NO_THREAD_SAFETY_ANALYSIS;
@@ -158,11 +158,11 @@
void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
size_t maximum_size, bool low_memory_mode) OVERRIDE {
return CreateRosAlloc(base, morecore_start, initial_size, maximum_size, low_memory_mode,
- RUNNING_ON_VALGRIND != 0);
+ RUNNING_ON_MEMORY_TOOL != 0);
}
static allocator::RosAlloc* CreateRosAlloc(void* base, size_t morecore_start, size_t initial_size,
size_t maximum_size, bool low_memory_mode,
- bool running_on_valgrind);
+ bool running_on_memory_tool);
void InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
void* arg, bool do_null_callback_at_end)