blob: 9a42e2cd885e08b96ecf589fce527264188b2b5c [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
19
20#include "space.h"
21
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -080022#include <valgrind.h>
23#include <memcheck/memcheck.h>
24
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070025namespace art {
26namespace gc {
27
28namespace collector {
29 class MarkSweep;
30} // namespace collector
31
32namespace space {
33
Mathieu Chartiera1602f22014-01-13 17:19:19 -080034class ZygoteSpace;
35
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070036// TODO: Remove define macro
37#define CHECK_MEMORY_CALL(call, args, what) \
38 do { \
39 int rc = call args; \
40 if (UNLIKELY(rc != 0)) { \
41 errno = rc; \
42 PLOG(FATAL) << # call << " failed for " << what; \
43 } \
44 } while (false)
45
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070046// A common parent of DlMallocSpace and RosAllocSpace.
47class MallocSpace : public ContinuousMemMapAllocSpace {
48 public:
49 typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
50
51 SpaceType GetType() const {
Mathieu Chartiera1602f22014-01-13 17:19:19 -080052 return kSpaceTypeMallocSpace;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070053 }
54
55 // Allocate num_bytes without allowing the underlying space to grow.
56 virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
57 size_t* bytes_allocated) = 0;
58 // Allocate num_bytes allowing the underlying space to grow.
59 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) = 0;
60 // Return the storage space required by obj.
61 virtual size_t AllocationSize(const mirror::Object* obj) = 0;
62 virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
63 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
64
65#ifndef NDEBUG
66 virtual void CheckMoreCoreForPrecondition() {} // to be overridden in the debug build.
67#else
68 void CheckMoreCoreForPrecondition() {} // no-op in the non-debug build.
69#endif
70
71 void* MoreCore(intptr_t increment);
72
73 // Hands unused pages back to the system.
74 virtual size_t Trim() = 0;
75
76 // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
77 // in use, indicated by num_bytes equaling zero.
78 virtual void Walk(WalkCallback callback, void* arg) = 0;
79
80 // Returns the number of bytes that the space has currently obtained from the system. This is
81 // greater or equal to the amount of live data in the space.
82 virtual size_t GetFootprint() = 0;
83
84 // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
85 virtual size_t GetFootprintLimit() = 0;
86
87 // Set the maximum number of bytes that the heap is allowed to obtain from the system via
88 // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
89 // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
90 virtual void SetFootprintLimit(size_t limit) = 0;
91
92 // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
93 // maximum reserved size of the heap.
94 void ClearGrowthLimit() {
95 growth_limit_ = NonGrowthLimitCapacity();
96 }
97
98 // Override capacity so that we only return the possibly limited capacity
99 size_t Capacity() const {
100 return growth_limit_;
101 }
102
103 // The total amount of memory reserved for the alloc space.
104 size_t NonGrowthLimitCapacity() const {
105 return GetMemMap()->Size();
106 }
107
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700108 void Dump(std::ostream& os) const;
109
110 void SetGrowthLimit(size_t growth_limit);
111
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700112 virtual MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
113 byte* begin, byte* end, byte* limit, size_t growth_limit) = 0;
114
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800115 // Splits ourself into a zygote space and new malloc space which has our unused memory. When true,
116 // the low memory mode argument specifies that the heap wishes the created space to be more
117 // aggressive in releasing unused pages. Invalidates the space its called on.
118 ZygoteSpace* CreateZygoteSpace(const char* alloc_space_name, bool low_memory_mode,
119 MallocSpace** out_malloc_space) NO_THREAD_SAFETY_ANALYSIS;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700120 virtual uint64_t GetBytesAllocated() = 0;
121 virtual uint64_t GetObjectsAllocated() = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700122
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700123 // Returns the class of a recently freed object.
124 mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
125
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700126 protected:
127 MallocSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end,
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800128 byte* limit, size_t growth_limit, bool create_bitmaps = true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700129
130 static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
131 size_t* growth_limit, size_t* capacity, byte* requested_begin);
132
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800133 // When true the low memory mode argument specifies that the heap
134 // wishes the created allocator to be more aggressive in releasing
135 // unused pages.
136 virtual void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
137 bool low_memory_mode) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700138
139 void RegisterRecentFree(mirror::Object* ptr) EXCLUSIVE_LOCKS_REQUIRED(lock_);
140
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800141 virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() {
142 return &SweepCallback;
143 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700144
145 // Recent allocation buffer.
146 static constexpr size_t kRecentFreeCount = kDebugSpaces ? (1 << 16) : 0;
147 static constexpr size_t kRecentFreeMask = kRecentFreeCount - 1;
148 std::pair<const mirror::Object*, mirror::Class*> recent_freed_objects_[kRecentFreeCount];
149 size_t recent_free_pos_;
150
151 static size_t bitmap_index_;
152
153 // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
154 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
155
156 // The capacity of the alloc space until such time that ClearGrowthLimit is called.
157 // The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
158 // limit is a value <= to the mem_map_ capacity used for ergonomic reasons because of the zygote.
159 // Prior to forking the zygote the heap will have a maximally sized mem_map_ but the growth_limit_
160 // will be set to a lower value. The growth_limit_ is used as the capacity of the alloc_space_,
161 // however, capacity normally can't vary. In the case of the growth_limit_ it can be cleared
162 // one time by a call to ClearGrowthLimit.
163 size_t growth_limit_;
164
Hiroshi Yamauchie5eedcb2013-11-18 11:55:39 -0800165 private:
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800166 static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
167
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700168 DISALLOW_COPY_AND_ASSIGN(MallocSpace);
169};
170
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -0800171// Number of bytes to use as a red zone (rdz). A red zone of this size will be placed before and
172// after each allocation. 8 bytes provides long/double alignment.
173static constexpr size_t kValgrindRedZoneBytes = 8;
174
175// A specialization of DlMallocSpace/RosAllocSpace that provides information to valgrind wrt allocations.
176template <typename BaseMallocSpaceType, typename AllocatorType>
177class ValgrindMallocSpace : public BaseMallocSpaceType {
178 public:
179 virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
180 void* obj_with_rdz = BaseMallocSpaceType::AllocWithGrowth(self, num_bytes + 2 * kValgrindRedZoneBytes,
181 bytes_allocated);
182 if (obj_with_rdz == NULL) {
183 return NULL;
184 }
185 mirror::Object* result = reinterpret_cast<mirror::Object*>(
186 reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
187 // Make redzones as no access.
188 VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
189 VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
190 return result;
191 }
192
193 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
194 void* obj_with_rdz = BaseMallocSpaceType::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes,
195 bytes_allocated);
196 if (obj_with_rdz == NULL) {
197 return NULL;
198 }
199 mirror::Object* result = reinterpret_cast<mirror::Object*>(
200 reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
201 // Make redzones as no access.
202 VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
203 VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
204 return result;
205 }
206
207 virtual size_t AllocationSize(const mirror::Object* obj) {
208 size_t result = BaseMallocSpaceType::AllocationSize(reinterpret_cast<const mirror::Object*>(
209 reinterpret_cast<const byte*>(obj) - kValgrindRedZoneBytes));
210 return result - 2 * kValgrindRedZoneBytes;
211 }
212
213 virtual size_t Free(Thread* self, mirror::Object* ptr) {
214 void* obj_after_rdz = reinterpret_cast<void*>(ptr);
215 void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
216 // Make redzones undefined.
217 size_t allocation_size = BaseMallocSpaceType::AllocationSize(
218 reinterpret_cast<mirror::Object*>(obj_with_rdz));
219 VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, allocation_size);
220 size_t freed = BaseMallocSpaceType::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
221 return freed - 2 * kValgrindRedZoneBytes;
222 }
223
224 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
225 size_t freed = 0;
226 for (size_t i = 0; i < num_ptrs; i++) {
227 freed += Free(self, ptrs[i]);
228 }
229 return freed;
230 }
231
232 ValgrindMallocSpace(const std::string& name, MemMap* mem_map, AllocatorType allocator, byte* begin,
233 byte* end, byte* limit, size_t growth_limit, size_t initial_size) :
234 BaseMallocSpaceType(name, mem_map, allocator, begin, end, limit, growth_limit) {
235 VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size, mem_map->Size() - initial_size);
236 }
237
238 virtual ~ValgrindMallocSpace() {
239 }
240
241 private:
242 DISALLOW_COPY_AND_ASSIGN(ValgrindMallocSpace);
243};
244
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700245} // namespace space
246} // namespace gc
247} // namespace art
248
Hiroshi Yamauchie5eedcb2013-11-18 11:55:39 -0800249#endif // ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_