blob: 7681b6d459e83b4d1105ba38b8094cd0feeaedcb [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
19
20#include "space.h"
21
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -080022#include <valgrind.h>
23#include <memcheck/memcheck.h>
24
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070025namespace art {
26namespace gc {
27
28namespace collector {
29 class MarkSweep;
30} // namespace collector
31
32namespace space {
33
34// TODO: Remove define macro
35#define CHECK_MEMORY_CALL(call, args, what) \
36 do { \
37 int rc = call args; \
38 if (UNLIKELY(rc != 0)) { \
39 errno = rc; \
40 PLOG(FATAL) << # call << " failed for " << what; \
41 } \
42 } while (false)
43
44// const bool kUseRosAlloc = true;
45
46// A common parent of DlMallocSpace and RosAllocSpace.
47class MallocSpace : public ContinuousMemMapAllocSpace {
48 public:
49 typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
50
51 SpaceType GetType() const {
52 if (GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) {
53 return kSpaceTypeZygoteSpace;
54 } else {
55 return kSpaceTypeAllocSpace;
56 }
57 }
58
59 // Allocate num_bytes without allowing the underlying space to grow.
60 virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
61 size_t* bytes_allocated) = 0;
62 // Allocate num_bytes allowing the underlying space to grow.
63 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) = 0;
64 // Return the storage space required by obj.
65 virtual size_t AllocationSize(const mirror::Object* obj) = 0;
66 virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
67 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
68
69#ifndef NDEBUG
70 virtual void CheckMoreCoreForPrecondition() {} // to be overridden in the debug build.
71#else
72 void CheckMoreCoreForPrecondition() {} // no-op in the non-debug build.
73#endif
74
75 void* MoreCore(intptr_t increment);
76
77 // Hands unused pages back to the system.
78 virtual size_t Trim() = 0;
79
80 // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
81 // in use, indicated by num_bytes equaling zero.
82 virtual void Walk(WalkCallback callback, void* arg) = 0;
83
84 // Returns the number of bytes that the space has currently obtained from the system. This is
85 // greater or equal to the amount of live data in the space.
86 virtual size_t GetFootprint() = 0;
87
88 // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
89 virtual size_t GetFootprintLimit() = 0;
90
91 // Set the maximum number of bytes that the heap is allowed to obtain from the system via
92 // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
93 // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
94 virtual void SetFootprintLimit(size_t limit) = 0;
95
96 // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
97 // maximum reserved size of the heap.
98 void ClearGrowthLimit() {
99 growth_limit_ = NonGrowthLimitCapacity();
100 }
101
102 // Override capacity so that we only return the possibly limited capacity
103 size_t Capacity() const {
104 return growth_limit_;
105 }
106
107 // The total amount of memory reserved for the alloc space.
108 size_t NonGrowthLimitCapacity() const {
109 return GetMemMap()->Size();
110 }
111
112 accounting::SpaceBitmap* GetLiveBitmap() const {
113 return live_bitmap_.get();
114 }
115
116 accounting::SpaceBitmap* GetMarkBitmap() const {
117 return mark_bitmap_.get();
118 }
119
120 void Dump(std::ostream& os) const;
121
122 void SetGrowthLimit(size_t growth_limit);
123
124 // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
125 void SwapBitmaps();
126
127 virtual MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
128 byte* begin, byte* end, byte* limit, size_t growth_limit) = 0;
129
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800130 // Turn ourself into a zygote space and return a new alloc space
131 // which has our unused memory. When true, the low memory mode
132 // argument specifies that the heap wishes the created space to be
133 // more aggressive in releasing unused pages.
134 MallocSpace* CreateZygoteSpace(const char* alloc_space_name, bool low_memory_mode);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700135
136 virtual uint64_t GetBytesAllocated() = 0;
137 virtual uint64_t GetObjectsAllocated() = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700138
139 // Returns the old mark bitmap.
140 accounting::SpaceBitmap* BindLiveToMarkBitmap();
141 bool HasBoundBitmaps() const;
142 void UnBindBitmaps();
143
144 // Returns the class of a recently freed object.
145 mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
146
147 // Used to ensure that failure happens when you free / allocate into an invalidated space. If we
148 // don't do this we may get heap corruption instead of a segfault at null.
149 virtual void InvalidateAllocator() = 0;
150
Mathieu Chartierec050072014-01-07 16:00:07 -0800151 // Sweep the references in the malloc space.
152 void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes);
153
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700154 protected:
155 MallocSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end,
156 byte* limit, size_t growth_limit);
157
158 static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
159 size_t* growth_limit, size_t* capacity, byte* requested_begin);
160
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800161 // When true the low memory mode argument specifies that the heap
162 // wishes the created allocator to be more aggressive in releasing
163 // unused pages.
164 virtual void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
165 bool low_memory_mode) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700166
167 void RegisterRecentFree(mirror::Object* ptr) EXCLUSIVE_LOCKS_REQUIRED(lock_);
168
169 UniquePtr<accounting::SpaceBitmap> live_bitmap_;
170 UniquePtr<accounting::SpaceBitmap> mark_bitmap_;
171 UniquePtr<accounting::SpaceBitmap> temp_bitmap_;
172
173 // Recent allocation buffer.
174 static constexpr size_t kRecentFreeCount = kDebugSpaces ? (1 << 16) : 0;
175 static constexpr size_t kRecentFreeMask = kRecentFreeCount - 1;
176 std::pair<const mirror::Object*, mirror::Class*> recent_freed_objects_[kRecentFreeCount];
177 size_t recent_free_pos_;
178
179 static size_t bitmap_index_;
180
181 // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
182 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
183
184 // The capacity of the alloc space until such time that ClearGrowthLimit is called.
185 // The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
186 // limit is a value <= to the mem_map_ capacity used for ergonomic reasons because of the zygote.
187 // Prior to forking the zygote the heap will have a maximally sized mem_map_ but the growth_limit_
188 // will be set to a lower value. The growth_limit_ is used as the capacity of the alloc_space_,
189 // however, capacity normally can't vary. In the case of the growth_limit_ it can be cleared
190 // one time by a call to ClearGrowthLimit.
191 size_t growth_limit_;
192
193 friend class collector::MarkSweep;
194
Hiroshi Yamauchie5eedcb2013-11-18 11:55:39 -0800195 private:
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700196 DISALLOW_COPY_AND_ASSIGN(MallocSpace);
197};
198
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -0800199// Number of bytes to use as a red zone (rdz). A red zone of this size will be placed before and
200// after each allocation. 8 bytes provides long/double alignment.
201static constexpr size_t kValgrindRedZoneBytes = 8;
202
203// A specialization of DlMallocSpace/RosAllocSpace that provides information to valgrind wrt allocations.
204template <typename BaseMallocSpaceType, typename AllocatorType>
205class ValgrindMallocSpace : public BaseMallocSpaceType {
206 public:
207 virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
208 void* obj_with_rdz = BaseMallocSpaceType::AllocWithGrowth(self, num_bytes + 2 * kValgrindRedZoneBytes,
209 bytes_allocated);
210 if (obj_with_rdz == NULL) {
211 return NULL;
212 }
213 mirror::Object* result = reinterpret_cast<mirror::Object*>(
214 reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
215 // Make redzones as no access.
216 VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
217 VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
218 return result;
219 }
220
221 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
222 void* obj_with_rdz = BaseMallocSpaceType::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes,
223 bytes_allocated);
224 if (obj_with_rdz == NULL) {
225 return NULL;
226 }
227 mirror::Object* result = reinterpret_cast<mirror::Object*>(
228 reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
229 // Make redzones as no access.
230 VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
231 VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
232 return result;
233 }
234
235 virtual size_t AllocationSize(const mirror::Object* obj) {
236 size_t result = BaseMallocSpaceType::AllocationSize(reinterpret_cast<const mirror::Object*>(
237 reinterpret_cast<const byte*>(obj) - kValgrindRedZoneBytes));
238 return result - 2 * kValgrindRedZoneBytes;
239 }
240
241 virtual size_t Free(Thread* self, mirror::Object* ptr) {
242 void* obj_after_rdz = reinterpret_cast<void*>(ptr);
243 void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
244 // Make redzones undefined.
245 size_t allocation_size = BaseMallocSpaceType::AllocationSize(
246 reinterpret_cast<mirror::Object*>(obj_with_rdz));
247 VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, allocation_size);
248 size_t freed = BaseMallocSpaceType::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
249 return freed - 2 * kValgrindRedZoneBytes;
250 }
251
252 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
253 size_t freed = 0;
254 for (size_t i = 0; i < num_ptrs; i++) {
255 freed += Free(self, ptrs[i]);
256 }
257 return freed;
258 }
259
260 ValgrindMallocSpace(const std::string& name, MemMap* mem_map, AllocatorType allocator, byte* begin,
261 byte* end, byte* limit, size_t growth_limit, size_t initial_size) :
262 BaseMallocSpaceType(name, mem_map, allocator, begin, end, limit, growth_limit) {
263 VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size, mem_map->Size() - initial_size);
264 }
265
266 virtual ~ValgrindMallocSpace() {
267 }
268
269 private:
270 DISALLOW_COPY_AND_ASSIGN(ValgrindMallocSpace);
271};
272
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700273} // namespace space
274} // namespace gc
275} // namespace art
276
Hiroshi Yamauchie5eedcb2013-11-18 11:55:39 -0800277#endif // ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_