blob: 8a2f9707fd39d1cc91225b2803f5d74a77089a4d [file] [log] [blame]
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_SRC_GC_LARGE_OBJECT_SPACE_H_
18#define ART_SRC_GC_LARGE_OBJECT_SPACE_H_
19
20#include "space.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080021#include "safe_map.h"
22
23#include <set>
24#include <vector>
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070025
26namespace art {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080027class SpaceSetMap;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070028
Ian Rogers22a20862013-03-16 16:34:57 -070029// Abstraction implemented by all large object spaces.
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070030class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
31 public:
32 virtual bool CanAllocateInto() const {
33 return true;
34 }
35
36 virtual bool IsCompactible() const {
37 return true;
38 }
39
40 virtual SpaceType GetType() const {
41 return kSpaceTypeLargeObjectSpace;
42 }
43
44 virtual SpaceSetMap* GetLiveObjects() const {
45 return live_objects_.get();
46 }
47
48 virtual SpaceSetMap* GetMarkObjects() const {
49 return mark_objects_.get();
50 }
51
52 virtual void SwapBitmaps();
53 virtual void CopyLiveToMarked();
54 virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
55 virtual ~LargeObjectSpace() {}
56
57 uint64_t GetNumBytesAllocated() const {
58 return num_bytes_allocated_;
59 }
60
61 uint64_t GetNumObjectsAllocated() const {
62 return num_objects_allocated_;
63 }
64
65 uint64_t GetTotalBytesAllocated() const {
66 return total_bytes_allocated_;
67 }
68
69 uint64_t GetTotalObjectsAllocated() const {
70 return total_objects_allocated_;
71 }
72
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080073 size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070074
75 protected:
76
77 LargeObjectSpace(const std::string& name);
78
79 // Approximate number of bytes which have been allocated into the space.
80 size_t num_bytes_allocated_;
81 size_t num_objects_allocated_;
82 size_t total_bytes_allocated_;
83 size_t total_objects_allocated_;
84
85 UniquePtr<SpaceSetMap> live_objects_;
86 UniquePtr<SpaceSetMap> mark_objects_;
87
88 friend class Space;
89};
90
Ian Rogers22a20862013-03-16 16:34:57 -070091// A discontinuous large object space implemented by individual mmap/munmap calls.
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070092class LargeObjectMapSpace : public LargeObjectSpace {
93 public:
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070094 // Creates a large object space. Allocations into the large object space use memory maps instead
95 // of malloc.
96 static LargeObjectMapSpace* Create(const std::string& name);
97
98 // Return the storage space required by obj.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080099 virtual size_t AllocationSize(const mirror::Object* obj);
100 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes);
101 size_t Free(Thread* self, mirror::Object* ptr);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700102 virtual void Walk(DlMallocSpace::WalkCallback, void* arg);
Ian Rogersa3dd0b32013-03-19 19:30:59 -0700103 // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
104 virtual bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700105private:
106 LargeObjectMapSpace(const std::string& name);
107 virtual ~LargeObjectMapSpace() {}
108
109 // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
Ian Rogers22a20862013-03-16 16:34:57 -0700110 mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
111 std::vector<mirror::Object*> large_objects_ GUARDED_BY(lock_);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800112 typedef SafeMap<mirror::Object*, MemMap*> MemMaps;
Ian Rogers22a20862013-03-16 16:34:57 -0700113 MemMaps mem_maps_ GUARDED_BY(lock_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700114};
115
Ian Rogers22a20862013-03-16 16:34:57 -0700116// A continuous large object space with a free-list to handle holes.
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700117class FreeListSpace : public LargeObjectSpace {
118 public:
119 virtual ~FreeListSpace();
120 static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity);
121
Ian Rogers22a20862013-03-16 16:34:57 -0700122 size_t AllocationSize(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(lock_);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800123 mirror::Object* Alloc(Thread* self, size_t num_bytes);
124 size_t Free(Thread* self, mirror::Object* obj);
125 bool Contains(const mirror::Object* obj) const;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700126 void Walk(DlMallocSpace::WalkCallback callback, void* arg);
127
Ian Rogers22a20862013-03-16 16:34:57 -0700128 // Address at which the space begins.
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700129 byte* Begin() const {
130 return begin_;
131 }
132
133 // Address at which the space ends, which may vary as the space is filled.
134 byte* End() const {
135 return end_;
136 }
137
138 // Current size of space
139 size_t Size() const {
140 return End() - Begin();
141 }
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700142
143 virtual void Dump(std::ostream& os) const;
144
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700145 private:
146 static const size_t kAlignment = kPageSize;
147
148 class Chunk {
149 public:
150 static const size_t kFreeFlag = 0x80000000;
151
152 struct SortBySize {
153 bool operator()(const Chunk* a, const Chunk* b) const {
154 return a->GetSize() < b->GetSize();
155 }
156 };
157
158 bool IsFree() const {
159 return (m_size & kFreeFlag) != 0;
160 }
161
162 void SetSize(size_t size, bool is_free = false) {
163 m_size = size | (is_free ? kFreeFlag : 0);
164 }
165
166 size_t GetSize() const {
167 return m_size & (kFreeFlag - 1);
168 }
169
170 Chunk* GetPrevious() {
171 return m_previous;
172 }
173
174 void SetPrevious(Chunk* previous) {
175 m_previous = previous;
176 DCHECK(m_previous == NULL ||
177 (m_previous != NULL && m_previous + m_previous->GetSize() / kAlignment == this));
178 }
179 private:
180 size_t m_size;
181 Chunk* m_previous;
182 };
183
184 FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end);
Ian Rogers22a20862013-03-16 16:34:57 -0700185 void AddFreeChunk(void* address, size_t size, Chunk* previous) EXCLUSIVE_LOCKS_REQUIRED(lock_);
186 Chunk* ChunkFromAddr(void* address) EXCLUSIVE_LOCKS_REQUIRED(lock_);
187 void* AddrFromChunk(Chunk* chunk) EXCLUSIVE_LOCKS_REQUIRED(lock_);
188 void RemoveFreeChunk(Chunk* chunk) EXCLUSIVE_LOCKS_REQUIRED(lock_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700189 Chunk* GetNextChunk(Chunk* chunk);
190
191 typedef std::multiset<Chunk*, Chunk::SortBySize> FreeChunks;
Ian Rogers22a20862013-03-16 16:34:57 -0700192 byte* const begin_;
193 byte* const end_;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700194 UniquePtr<MemMap> mem_map_;
Ian Rogers22a20862013-03-16 16:34:57 -0700195 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
196 std::vector<Chunk> chunks_ GUARDED_BY(lock_);
197 FreeChunks free_chunks_ GUARDED_BY(lock_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700198};
199
200}
201
202#endif // ART_SRC_GC_LARGE_OBJECT_SPACE_H_