blob: f17bcd26e58a00dad0d53cf7a530f41ec27480f6 [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
19
20#include "space.h"
21
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -080022#include <valgrind.h>
23#include <memcheck/memcheck.h>
24
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070025namespace art {
26namespace gc {
27
28namespace collector {
29 class MarkSweep;
30} // namespace collector
31
32namespace space {
33
Mathieu Chartiera1602f22014-01-13 17:19:19 -080034class ZygoteSpace;
35
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070036// TODO: Remove define macro
37#define CHECK_MEMORY_CALL(call, args, what) \
38 do { \
39 int rc = call args; \
40 if (UNLIKELY(rc != 0)) { \
41 errno = rc; \
42 PLOG(FATAL) << # call << " failed for " << what; \
43 } \
44 } while (false)
45
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070046// A common parent of DlMallocSpace and RosAllocSpace.
47class MallocSpace : public ContinuousMemMapAllocSpace {
48 public:
49 typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
50
51 SpaceType GetType() const {
Mathieu Chartiera1602f22014-01-13 17:19:19 -080052 return kSpaceTypeMallocSpace;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070053 }
54
55 // Allocate num_bytes without allowing the underlying space to grow.
56 virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
57 size_t* bytes_allocated) = 0;
58 // Allocate num_bytes allowing the underlying space to grow.
59 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) = 0;
60 // Return the storage space required by obj.
Ian Rogersef7d42f2014-01-06 12:55:46 -080061 virtual size_t AllocationSize(mirror::Object* obj) = 0;
62 virtual size_t Free(Thread* self, mirror::Object* ptr)
63 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
64 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
65 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070066
67#ifndef NDEBUG
68 virtual void CheckMoreCoreForPrecondition() {} // to be overridden in the debug build.
69#else
70 void CheckMoreCoreForPrecondition() {} // no-op in the non-debug build.
71#endif
72
73 void* MoreCore(intptr_t increment);
74
75 // Hands unused pages back to the system.
76 virtual size_t Trim() = 0;
77
78 // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
79 // in use, indicated by num_bytes equaling zero.
80 virtual void Walk(WalkCallback callback, void* arg) = 0;
81
82 // Returns the number of bytes that the space has currently obtained from the system. This is
83 // greater or equal to the amount of live data in the space.
84 virtual size_t GetFootprint() = 0;
85
86 // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
87 virtual size_t GetFootprintLimit() = 0;
88
89 // Set the maximum number of bytes that the heap is allowed to obtain from the system via
90 // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
91 // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
92 virtual void SetFootprintLimit(size_t limit) = 0;
93
94 // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
95 // maximum reserved size of the heap.
96 void ClearGrowthLimit() {
97 growth_limit_ = NonGrowthLimitCapacity();
98 }
99
100 // Override capacity so that we only return the possibly limited capacity
101 size_t Capacity() const {
102 return growth_limit_;
103 }
104
105 // The total amount of memory reserved for the alloc space.
106 size_t NonGrowthLimitCapacity() const {
107 return GetMemMap()->Size();
108 }
109
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700110 void Dump(std::ostream& os) const;
111
112 void SetGrowthLimit(size_t growth_limit);
113
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700114 virtual MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
115 byte* begin, byte* end, byte* limit, size_t growth_limit) = 0;
116
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800117 // Splits ourself into a zygote space and new malloc space which has our unused memory. When true,
118 // the low memory mode argument specifies that the heap wishes the created space to be more
119 // aggressive in releasing unused pages. Invalidates the space its called on.
120 ZygoteSpace* CreateZygoteSpace(const char* alloc_space_name, bool low_memory_mode,
121 MallocSpace** out_malloc_space) NO_THREAD_SAFETY_ANALYSIS;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700122 virtual uint64_t GetBytesAllocated() = 0;
123 virtual uint64_t GetObjectsAllocated() = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700124
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700125 // Returns the class of a recently freed object.
126 mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
127
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700128 protected:
129 MallocSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end,
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800130 byte* limit, size_t growth_limit, bool create_bitmaps = true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700131
132 static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
133 size_t* growth_limit, size_t* capacity, byte* requested_begin);
134
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800135 // When true the low memory mode argument specifies that the heap
136 // wishes the created allocator to be more aggressive in releasing
137 // unused pages.
138 virtual void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
139 bool low_memory_mode) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700140
Ian Rogersef7d42f2014-01-06 12:55:46 -0800141 void RegisterRecentFree(mirror::Object* ptr)
142 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
143 EXCLUSIVE_LOCKS_REQUIRED(lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700144
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800145 virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() {
146 return &SweepCallback;
147 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700148
149 // Recent allocation buffer.
150 static constexpr size_t kRecentFreeCount = kDebugSpaces ? (1 << 16) : 0;
151 static constexpr size_t kRecentFreeMask = kRecentFreeCount - 1;
152 std::pair<const mirror::Object*, mirror::Class*> recent_freed_objects_[kRecentFreeCount];
153 size_t recent_free_pos_;
154
155 static size_t bitmap_index_;
156
157 // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
158 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
159
160 // The capacity of the alloc space until such time that ClearGrowthLimit is called.
161 // The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
162 // limit is a value <= to the mem_map_ capacity used for ergonomic reasons because of the zygote.
163 // Prior to forking the zygote the heap will have a maximally sized mem_map_ but the growth_limit_
164 // will be set to a lower value. The growth_limit_ is used as the capacity of the alloc_space_,
165 // however, capacity normally can't vary. In the case of the growth_limit_ it can be cleared
166 // one time by a call to ClearGrowthLimit.
167 size_t growth_limit_;
168
Hiroshi Yamauchie5eedcb2013-11-18 11:55:39 -0800169 private:
Ian Rogersef7d42f2014-01-06 12:55:46 -0800170 static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
171 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800172
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700173 DISALLOW_COPY_AND_ASSIGN(MallocSpace);
174};
175
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -0800176// Number of bytes to use as a red zone (rdz). A red zone of this size will be placed before and
177// after each allocation. 8 bytes provides long/double alignment.
178static constexpr size_t kValgrindRedZoneBytes = 8;
179
180// A specialization of DlMallocSpace/RosAllocSpace that provides information to valgrind wrt allocations.
181template <typename BaseMallocSpaceType, typename AllocatorType>
182class ValgrindMallocSpace : public BaseMallocSpaceType {
183 public:
184 virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
185 void* obj_with_rdz = BaseMallocSpaceType::AllocWithGrowth(self, num_bytes + 2 * kValgrindRedZoneBytes,
186 bytes_allocated);
187 if (obj_with_rdz == NULL) {
188 return NULL;
189 }
190 mirror::Object* result = reinterpret_cast<mirror::Object*>(
191 reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
192 // Make redzones as no access.
193 VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
194 VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
195 return result;
196 }
197
198 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
199 void* obj_with_rdz = BaseMallocSpaceType::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes,
200 bytes_allocated);
201 if (obj_with_rdz == NULL) {
202 return NULL;
203 }
204 mirror::Object* result = reinterpret_cast<mirror::Object*>(
205 reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
206 // Make redzones as no access.
207 VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
208 VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
209 return result;
210 }
211
Ian Rogersef7d42f2014-01-06 12:55:46 -0800212 virtual size_t AllocationSize(mirror::Object* obj) {
213 size_t result = BaseMallocSpaceType::AllocationSize(reinterpret_cast<mirror::Object*>(
214 reinterpret_cast<byte*>(obj) - kValgrindRedZoneBytes));
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -0800215 return result - 2 * kValgrindRedZoneBytes;
216 }
217
Ian Rogersef7d42f2014-01-06 12:55:46 -0800218 virtual size_t Free(Thread* self, mirror::Object* ptr)
219 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -0800220 void* obj_after_rdz = reinterpret_cast<void*>(ptr);
221 void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
222 // Make redzones undefined.
223 size_t allocation_size = BaseMallocSpaceType::AllocationSize(
224 reinterpret_cast<mirror::Object*>(obj_with_rdz));
225 VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, allocation_size);
226 size_t freed = BaseMallocSpaceType::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
227 return freed - 2 * kValgrindRedZoneBytes;
228 }
229
Ian Rogersef7d42f2014-01-06 12:55:46 -0800230 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
231 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -0800232 size_t freed = 0;
233 for (size_t i = 0; i < num_ptrs; i++) {
234 freed += Free(self, ptrs[i]);
235 }
236 return freed;
237 }
238
239 ValgrindMallocSpace(const std::string& name, MemMap* mem_map, AllocatorType allocator, byte* begin,
240 byte* end, byte* limit, size_t growth_limit, size_t initial_size) :
241 BaseMallocSpaceType(name, mem_map, allocator, begin, end, limit, growth_limit) {
242 VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size, mem_map->Size() - initial_size);
243 }
244
245 virtual ~ValgrindMallocSpace() {
246 }
247
248 private:
249 DISALLOW_COPY_AND_ASSIGN(ValgrindMallocSpace);
250};
251
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700252} // namespace space
253} // namespace gc
254} // namespace art
255
Hiroshi Yamauchie5eedcb2013-11-18 11:55:39 -0800256#endif // ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_