blob: bbf1bbbdbd34160bd0195fefb20baa32b4911b11 [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
19
20#include "space.h"
21
Ian Rogerscf7f1912014-10-22 22:06:39 -070022#include <ostream>
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -080023#include <valgrind.h>
24#include <memcheck/memcheck.h>
25
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070026namespace art {
27namespace gc {
28
29namespace collector {
30 class MarkSweep;
31} // namespace collector
32
33namespace space {
34
Mathieu Chartiera1602f22014-01-13 17:19:19 -080035class ZygoteSpace;
36
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070037// TODO: Remove define macro
38#define CHECK_MEMORY_CALL(call, args, what) \
39 do { \
40 int rc = call args; \
41 if (UNLIKELY(rc != 0)) { \
42 errno = rc; \
43 PLOG(FATAL) << # call << " failed for " << what; \
44 } \
45 } while (false)
46
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070047// A common parent of DlMallocSpace and RosAllocSpace.
48class MallocSpace : public ContinuousMemMapAllocSpace {
49 public:
50 typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
51
52 SpaceType GetType() const {
Mathieu Chartiera1602f22014-01-13 17:19:19 -080053 return kSpaceTypeMallocSpace;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070054 }
55
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070056 // Allocate num_bytes allowing the underlying space to grow.
Ian Rogers6fac4472014-02-25 17:01:10 -080057 virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070058 size_t* bytes_allocated, size_t* usable_size,
59 size_t* bytes_tl_bulk_allocated) = 0;
Ian Rogers6fac4472014-02-25 17:01:10 -080060 // Allocate num_bytes without allowing the underlying space to grow.
61 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070062 size_t* usable_size, size_t* bytes_tl_bulk_allocated) = 0;
Ian Rogers6fac4472014-02-25 17:01:10 -080063 // Return the storage space required by obj. If usable_size isn't nullptr then it is set to the
64 // amount of the storage space that may be used by obj.
65 virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
Ian Rogersef7d42f2014-01-06 12:55:46 -080066 virtual size_t Free(Thread* self, mirror::Object* ptr)
67 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
68 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
69 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070070
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070071 // Returns the maximum bytes that could be allocated for the given
72 // size in bulk, that is the maximum value for the
73 // bytes_allocated_bulk out param returned by MallocSpace::Alloc().
74 virtual size_t MaxBytesBulkAllocatedFor(size_t num_bytes) = 0;
75
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070076#ifndef NDEBUG
77 virtual void CheckMoreCoreForPrecondition() {} // to be overridden in the debug build.
78#else
79 void CheckMoreCoreForPrecondition() {} // no-op in the non-debug build.
80#endif
81
82 void* MoreCore(intptr_t increment);
83
84 // Hands unused pages back to the system.
85 virtual size_t Trim() = 0;
86
87 // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
88 // in use, indicated by num_bytes equaling zero.
89 virtual void Walk(WalkCallback callback, void* arg) = 0;
90
91 // Returns the number of bytes that the space has currently obtained from the system. This is
92 // greater or equal to the amount of live data in the space.
93 virtual size_t GetFootprint() = 0;
94
95 // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
96 virtual size_t GetFootprintLimit() = 0;
97
98 // Set the maximum number of bytes that the heap is allowed to obtain from the system via
99 // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
100 // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
101 virtual void SetFootprintLimit(size_t limit) = 0;
102
103 // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
104 // maximum reserved size of the heap.
105 void ClearGrowthLimit() {
106 growth_limit_ = NonGrowthLimitCapacity();
107 }
108
109 // Override capacity so that we only return the possibly limited capacity
110 size_t Capacity() const {
111 return growth_limit_;
112 }
113
114 // The total amount of memory reserved for the alloc space.
115 size_t NonGrowthLimitCapacity() const {
116 return GetMemMap()->Size();
117 }
118
Mathieu Chartier379d09f2015-01-08 11:28:13 -0800119 // Change the non growth limit capacity by shrinking or expanding the map. Currently, only
120 // shrinking is supported.
121 void ClampGrowthLimit();
122
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700123 void Dump(std::ostream& os) const;
124
125 void SetGrowthLimit(size_t growth_limit);
126
Andreas Gamped7576322014-10-24 22:13:45 -0700127 virtual MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
128 uint8_t* begin, uint8_t* end, uint8_t* limit,
129 size_t growth_limit, bool can_move_objects) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700130
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800131 // Splits ourself into a zygote space and new malloc space which has our unused memory. When true,
132 // the low memory mode argument specifies that the heap wishes the created space to be more
133 // aggressive in releasing unused pages. Invalidates the space its called on.
134 ZygoteSpace* CreateZygoteSpace(const char* alloc_space_name, bool low_memory_mode,
135 MallocSpace** out_malloc_space) NO_THREAD_SAFETY_ANALYSIS;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700136 virtual uint64_t GetBytesAllocated() = 0;
137 virtual uint64_t GetObjectsAllocated() = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700138
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700139 // Returns the class of a recently freed object.
140 mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
141
Mathieu Chartier31f44142014-04-08 14:40:03 -0700142 bool CanMoveObjects() const OVERRIDE {
143 return can_move_objects_;
144 }
145
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700146 void DisableMovingObjects() {
147 can_move_objects_ = false;
148 }
149
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700150 protected:
Ian Rogers13735952014-10-08 12:43:28 -0700151 MallocSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end,
152 uint8_t* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
Mathieu Chartier31f44142014-04-08 14:40:03 -0700153 size_t starting_size, size_t initial_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700154
155 static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
Ian Rogers13735952014-10-08 12:43:28 -0700156 size_t* growth_limit, size_t* capacity, uint8_t* requested_begin);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700157
Ian Rogers6fac4472014-02-25 17:01:10 -0800158 // When true the low memory mode argument specifies that the heap wishes the created allocator to
159 // be more aggressive in releasing unused pages.
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800160 virtual void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800161 size_t maximum_size, bool low_memory_mode) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700162
Mathieu Chartier661974a2014-01-09 11:23:53 -0800163 virtual void RegisterRecentFree(mirror::Object* ptr)
Ian Rogersef7d42f2014-01-06 12:55:46 -0800164 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
165 EXCLUSIVE_LOCKS_REQUIRED(lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700166
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700167 virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800168 return &SweepCallback;
169 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700170
171 // Recent allocation buffer.
172 static constexpr size_t kRecentFreeCount = kDebugSpaces ? (1 << 16) : 0;
173 static constexpr size_t kRecentFreeMask = kRecentFreeCount - 1;
174 std::pair<const mirror::Object*, mirror::Class*> recent_freed_objects_[kRecentFreeCount];
175 size_t recent_free_pos_;
176
177 static size_t bitmap_index_;
178
179 // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
180 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
181
182 // The capacity of the alloc space until such time that ClearGrowthLimit is called.
183 // The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
184 // limit is a value <= to the mem_map_ capacity used for ergonomic reasons because of the zygote.
185 // Prior to forking the zygote the heap will have a maximally sized mem_map_ but the growth_limit_
186 // will be set to a lower value. The growth_limit_ is used as the capacity of the alloc_space_,
187 // however, capacity normally can't vary. In the case of the growth_limit_ it can be cleared
188 // one time by a call to ClearGrowthLimit.
189 size_t growth_limit_;
190
Mathieu Chartier31f44142014-04-08 14:40:03 -0700191 // True if objects in the space are movable.
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700192 bool can_move_objects_;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700193
194 // Starting and initial sized, used when you reset the space.
195 const size_t starting_size_;
196 const size_t initial_size_;
197
Hiroshi Yamauchie5eedcb2013-11-18 11:55:39 -0800198 private:
Ian Rogersef7d42f2014-01-06 12:55:46 -0800199 static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
200 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800201
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700202 DISALLOW_COPY_AND_ASSIGN(MallocSpace);
203};
204
205} // namespace space
206} // namespace gc
207} // namespace art
208
Hiroshi Yamauchie5eedcb2013-11-18 11:55:39 -0800209#endif // ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_