blob: 59dafe3f2a29944da2565e6dd16b33197a721ef0 [file] [log] [blame]
Ian Rogers1d54e732013-05-02 21:10:01 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_
Ian Rogers1d54e732013-05-02 21:10:01 -070019
20#include "gc/allocator/dlmalloc.h"
21#include "space.h"
22
23namespace art {
24namespace gc {
25
26namespace collector {
27 class MarkSweep;
28} // namespace collector
29
30namespace space {
31
32// An alloc space is a space where objects may be allocated and garbage collected.
Mathieu Chartier590fee92013-09-13 13:46:47 -070033class DlMallocSpace : public ContinuousMemMapAllocSpace {
Ian Rogers1d54e732013-05-02 21:10:01 -070034 public:
35 typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
36
37 SpaceType GetType() const {
38 if (GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) {
39 return kSpaceTypeZygoteSpace;
40 } else {
41 return kSpaceTypeAllocSpace;
42 }
43 }
44
45 // Create a AllocSpace with the requested sizes. The requested
46 // base address is not guaranteed to be granted, if it is required,
47 // the caller should call Begin on the returned space to confirm
48 // the request was granted.
49 static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
50 size_t capacity, byte* requested_begin);
51
52 // Allocate num_bytes without allowing the underlying mspace to grow.
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070053 virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
54 size_t* bytes_allocated) LOCKS_EXCLUDED(lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -070055
56 // Allocate num_bytes allowing the underlying mspace to grow.
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070057 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated);
Ian Rogers1d54e732013-05-02 21:10:01 -070058
59 // Return the storage space required by obj.
60 virtual size_t AllocationSize(const mirror::Object* obj);
61 virtual size_t Free(Thread* self, mirror::Object* ptr);
62 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
63
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070064 mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated);
65
66 size_t AllocationSizeNonvirtual(const mirror::Object* obj) {
67 return mspace_usable_size(const_cast<void*>(reinterpret_cast<const void*>(obj))) +
68 kChunkOverhead;
69 }
70
Ian Rogers1d54e732013-05-02 21:10:01 -070071 void* MoreCore(intptr_t increment);
72
73 void* GetMspace() const {
74 return mspace_;
75 }
76
77 // Hands unused pages back to the system.
78 size_t Trim();
79
80 // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
81 // in use, indicated by num_bytes equaling zero.
Mathieu Chartiereb5710e2013-07-25 15:19:42 -070082 void Walk(WalkCallback callback, void* arg) LOCKS_EXCLUDED(lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -070083
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -070084 // Returns the number of bytes that the space has currently obtained from the system. This is
85 // greater or equal to the amount of live data in the space.
86 size_t GetFootprint();
87
Ian Rogers1d54e732013-05-02 21:10:01 -070088 // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
89 size_t GetFootprintLimit();
90
91 // Set the maximum number of bytes that the heap is allowed to obtain from the system via
92 // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
93 // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
94 void SetFootprintLimit(size_t limit);
95
96 // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
97 // maximum reserved size of the heap.
98 void ClearGrowthLimit() {
99 growth_limit_ = NonGrowthLimitCapacity();
100 }
101
102 // Override capacity so that we only return the possibly limited capacity
103 size_t Capacity() const {
104 return growth_limit_;
105 }
106
107 // The total amount of memory reserved for the alloc space.
108 size_t NonGrowthLimitCapacity() const {
109 return GetMemMap()->Size();
110 }
111
112 accounting::SpaceBitmap* GetLiveBitmap() const {
113 return live_bitmap_.get();
114 }
115
116 accounting::SpaceBitmap* GetMarkBitmap() const {
117 return mark_bitmap_.get();
118 }
119
120 void Dump(std::ostream& os) const;
121
122 void SetGrowthLimit(size_t growth_limit);
123
124 // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
125 void SwapBitmaps();
126
127 // Turn ourself into a zygote space and return a new alloc space which has our unused memory.
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -0700128 DlMallocSpace* CreateZygoteSpace(const char* alloc_space_name);
Ian Rogers1d54e732013-05-02 21:10:01 -0700129
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700130 uint64_t GetBytesAllocated();
131 uint64_t GetObjectsAllocated();
132 uint64_t GetTotalBytesAllocated() {
133 return GetBytesAllocated() + total_bytes_freed_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700134 }
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700135 uint64_t GetTotalObjectsAllocated() {
136 return GetObjectsAllocated() + total_objects_freed_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700137 }
138
Mathieu Chartier590fee92013-09-13 13:46:47 -0700139 // Returns the old mark bitmap.
140 accounting::SpaceBitmap* BindLiveToMarkBitmap();
141 bool HasBoundBitmaps() const;
142 void UnBindBitmaps();
143
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700144 // Returns the class of a recently freed object.
145 mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
146
Mathieu Chartier590fee92013-09-13 13:46:47 -0700147 // Used to ensure that failure happens when you free / allocate into an invalidated space. If we
148 // don't do this we may get heap corruption instead of a segfault at null.
149 void InvalidateMSpace() {
150 mspace_ = nullptr;
151 }
152
Ian Rogers1d54e732013-05-02 21:10:01 -0700153 protected:
154 DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
Mathieu Chartier590fee92013-09-13 13:46:47 -0700155 byte* limit, size_t growth_limit);
Ian Rogers1d54e732013-05-02 21:10:01 -0700156
157 private:
158 size_t InternalAllocationSize(const mirror::Object* obj);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700159 mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes, size_t* bytes_allocated)
160 EXCLUSIVE_LOCKS_REQUIRED(lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700161 bool Init(size_t initial_size, size_t maximum_size, size_t growth_size, byte* requested_base);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700162 void RegisterRecentFree(mirror::Object* ptr) EXCLUSIVE_LOCKS_REQUIRED(lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700163 static void* CreateMallocSpace(void* base, size_t morecore_start, size_t initial_size);
164
165 UniquePtr<accounting::SpaceBitmap> live_bitmap_;
166 UniquePtr<accounting::SpaceBitmap> mark_bitmap_;
167 UniquePtr<accounting::SpaceBitmap> temp_bitmap_;
168
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700169 // Recent allocation buffer.
170 static constexpr size_t kRecentFreeCount = kDebugSpaces ? (1 << 16) : 0;
171 static constexpr size_t kRecentFreeMask = kRecentFreeCount - 1;
172 std::pair<const mirror::Object*, mirror::Class*> recent_freed_objects_[kRecentFreeCount];
173 size_t recent_free_pos_;
174
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700175 // Approximate number of bytes and objects which have been deallocated in the space.
176 size_t total_bytes_freed_;
177 size_t total_objects_freed_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700178
179 static size_t bitmap_index_;
180
181 // The boundary tag overhead.
182 static const size_t kChunkOverhead = kWordSize;
183
184 // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
185 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
186
187 // Underlying malloc space
Mathieu Chartier590fee92013-09-13 13:46:47 -0700188 void* mspace_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700189
190 // The capacity of the alloc space until such time that ClearGrowthLimit is called.
191 // The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
192 // limit is a value <= to the mem_map_ capacity used for ergonomic reasons because of the zygote.
193 // Prior to forking the zygote the heap will have a maximally sized mem_map_ but the growth_limit_
194 // will be set to a lower value. The growth_limit_ is used as the capacity of the alloc_space_,
195 // however, capacity normally can't vary. In the case of the growth_limit_ it can be cleared
196 // one time by a call to ClearGrowthLimit.
197 size_t growth_limit_;
198
199 friend class collector::MarkSweep;
200
201 DISALLOW_COPY_AND_ASSIGN(DlMallocSpace);
202};
203
204} // namespace space
205} // namespace gc
206} // namespace art
207
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700208#endif // ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_