blob: 8444a70b9cb747d24346d3bed1a1438891f75c3e [file] [log] [blame]
Ian Rogers1d54e732013-05-02 21:10:01 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_GC_SPACE_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_SPACE_H_
Ian Rogers1d54e732013-05-02 21:10:01 -070019
Ian Rogers700a4022014-05-19 16:49:03 -070020#include <memory>
Ian Rogers1d54e732013-05-02 21:10:01 -070021#include <string>
22
Ian Rogers1d54e732013-05-02 21:10:01 -070023#include "base/macros.h"
24#include "base/mutex.h"
25#include "gc/accounting/space_bitmap.h"
Mathieu Chartier10fb83a2014-06-15 15:15:43 -070026#include "gc/collector/garbage_collector.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070027#include "globals.h"
28#include "image.h"
29#include "mem_map.h"
30
31namespace art {
32namespace mirror {
33 class Object;
34} // namespace mirror
35
36namespace gc {
37
Ian Rogers1d54e732013-05-02 21:10:01 -070038class Heap;
39
40namespace space {
41
Mathieu Chartier590fee92013-09-13 13:46:47 -070042class AllocSpace;
Mathieu Chartier7410f292013-11-24 13:17:35 -080043class BumpPointerSpace;
Mathieu Chartiera1602f22014-01-13 17:19:19 -080044class ContinuousMemMapAllocSpace;
Mathieu Chartier590fee92013-09-13 13:46:47 -070045class ContinuousSpace;
Mathieu Chartier590fee92013-09-13 13:46:47 -070046class DiscontinuousSpace;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070047class MallocSpace;
48class DlMallocSpace;
49class RosAllocSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070050class ImageSpace;
51class LargeObjectSpace;
Mathieu Chartiera1602f22014-01-13 17:19:19 -080052class ZygoteSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070053
Mathieu Chartier0f72e412013-09-06 16:40:01 -070054static constexpr bool kDebugSpaces = kIsDebugBuild;
Ian Rogers1d54e732013-05-02 21:10:01 -070055
56// See Space::GetGcRetentionPolicy.
57enum GcRetentionPolicy {
58 // Objects are retained forever with this policy for a space.
59 kGcRetentionPolicyNeverCollect,
60 // Every GC cycle will attempt to collect objects in this space.
61 kGcRetentionPolicyAlwaysCollect,
62 // Objects will be considered for collection only in "full" GC cycles, ie faster partial
63 // collections won't scan these areas such as the Zygote.
64 kGcRetentionPolicyFullCollect,
65};
66std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy);
67
68enum SpaceType {
69 kSpaceTypeImageSpace,
Mathieu Chartiera1602f22014-01-13 17:19:19 -080070 kSpaceTypeMallocSpace,
Ian Rogers1d54e732013-05-02 21:10:01 -070071 kSpaceTypeZygoteSpace,
Mathieu Chartier590fee92013-09-13 13:46:47 -070072 kSpaceTypeBumpPointerSpace,
Ian Rogers1d54e732013-05-02 21:10:01 -070073 kSpaceTypeLargeObjectSpace,
74};
75std::ostream& operator<<(std::ostream& os, const SpaceType& space_type);
76
77// A space contains memory allocated for managed objects.
78class Space {
79 public:
80 // Dump space. Also key method for C++ vtables.
81 virtual void Dump(std::ostream& os) const;
82
83 // Name of the space. May vary, for example before/after the Zygote fork.
84 const char* GetName() const {
85 return name_.c_str();
86 }
87
88 // The policy of when objects are collected associated with this space.
89 GcRetentionPolicy GetGcRetentionPolicy() const {
90 return gc_retention_policy_;
91 }
92
Ian Rogers1d54e732013-05-02 21:10:01 -070093 // Is the given object contained within this space?
94 virtual bool Contains(const mirror::Object* obj) const = 0;
95
96 // The kind of space this: image, alloc, zygote, large object.
97 virtual SpaceType GetType() const = 0;
98
99 // Is this an image space, ie one backed by a memory mapped image file.
100 bool IsImageSpace() const {
101 return GetType() == kSpaceTypeImageSpace;
102 }
103 ImageSpace* AsImageSpace();
104
105 // Is this a dlmalloc backed allocation space?
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700106 bool IsMallocSpace() const {
Ian Rogers1d54e732013-05-02 21:10:01 -0700107 SpaceType type = GetType();
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800108 return type == kSpaceTypeMallocSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -0700109 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700110 MallocSpace* AsMallocSpace();
111
112 virtual bool IsDlMallocSpace() const {
113 return false;
114 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800115 virtual DlMallocSpace* AsDlMallocSpace();
116
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700117 virtual bool IsRosAllocSpace() const {
118 return false;
119 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800120 virtual RosAllocSpace* AsRosAllocSpace();
Ian Rogers1d54e732013-05-02 21:10:01 -0700121
Ian Rogers6fac4472014-02-25 17:01:10 -0800122 // Is this the space allocated into by the Zygote and no-longer in use for allocation?
Ian Rogers1d54e732013-05-02 21:10:01 -0700123 bool IsZygoteSpace() const {
124 return GetType() == kSpaceTypeZygoteSpace;
125 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800126 virtual ZygoteSpace* AsZygoteSpace();
Ian Rogers1d54e732013-05-02 21:10:01 -0700127
Mathieu Chartier590fee92013-09-13 13:46:47 -0700128 // Is this space a bump pointer space?
129 bool IsBumpPointerSpace() const {
130 return GetType() == kSpaceTypeBumpPointerSpace;
131 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800132 virtual BumpPointerSpace* AsBumpPointerSpace();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700133
Ian Rogers1d54e732013-05-02 21:10:01 -0700134 // Does this space hold large objects and implement the large object space abstraction?
135 bool IsLargeObjectSpace() const {
136 return GetType() == kSpaceTypeLargeObjectSpace;
137 }
138 LargeObjectSpace* AsLargeObjectSpace();
139
Mathieu Chartier590fee92013-09-13 13:46:47 -0700140 virtual bool IsContinuousSpace() const {
141 return false;
142 }
143 ContinuousSpace* AsContinuousSpace();
144
145 virtual bool IsDiscontinuousSpace() const {
146 return false;
147 }
148 DiscontinuousSpace* AsDiscontinuousSpace();
149
150 virtual bool IsAllocSpace() const {
151 return false;
152 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800153 virtual AllocSpace* AsAllocSpace();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700154
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800155 virtual bool IsContinuousMemMapAllocSpace() const {
156 return false;
157 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800158 virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace();
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800159
Mathieu Chartier31f44142014-04-08 14:40:03 -0700160 // Returns true if objects in the space are movable.
161 virtual bool CanMoveObjects() const = 0;
162
Ian Rogers1d54e732013-05-02 21:10:01 -0700163 virtual ~Space() {}
164
165 protected:
166 Space(const std::string& name, GcRetentionPolicy gc_retention_policy);
167
168 void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) {
169 gc_retention_policy_ = gc_retention_policy;
170 }
171
172 // Name of the space that may vary due to the Zygote fork.
173 std::string name_;
174
Mathieu Chartier590fee92013-09-13 13:46:47 -0700175 protected:
Ian Rogers1d54e732013-05-02 21:10:01 -0700176 // When should objects within this space be reclaimed? Not constant as we vary it in the case
177 // of Zygote forking.
178 GcRetentionPolicy gc_retention_policy_;
179
Mathieu Chartier590fee92013-09-13 13:46:47 -0700180 private:
Ian Rogers1d54e732013-05-02 21:10:01 -0700181 friend class art::gc::Heap;
Ian Rogers1d54e732013-05-02 21:10:01 -0700182 DISALLOW_COPY_AND_ASSIGN(Space);
183};
184std::ostream& operator<<(std::ostream& os, const Space& space);
185
186// AllocSpace interface.
187class AllocSpace {
188 public:
189 // Number of bytes currently allocated.
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700190 virtual uint64_t GetBytesAllocated() = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700191 // Number of objects currently allocated.
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700192 virtual uint64_t GetObjectsAllocated() = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700193
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700194 // Allocate num_bytes without allowing growth. If the allocation
195 // succeeds, the output parameter bytes_allocated will be set to the
196 // actually allocated bytes which is >= num_bytes.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700197 // Alloc can be called from multiple threads at the same time and must be thread-safe.
Ian Rogers6fac4472014-02-25 17:01:10 -0800198 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
199 size_t* usable_size) = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700200
Mathieu Chartier0651d412014-04-29 14:37:57 -0700201 // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
202 virtual mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
203 size_t* usable_size)
204 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
205 return Alloc(self, num_bytes, bytes_allocated, usable_size);
206 }
207
Ian Rogers1d54e732013-05-02 21:10:01 -0700208 // Return the storage space required by obj.
Ian Rogers6fac4472014-02-25 17:01:10 -0800209 virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700210
211 // Returns how many bytes were freed.
212 virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
213
214 // Returns how many bytes were freed.
215 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
216
Ian Rogers6fac4472014-02-25 17:01:10 -0800217 // Revoke any sort of thread-local buffers that are used to speed up allocations for the given
218 // thread, if the alloc space implementation uses any.
219 virtual void RevokeThreadLocalBuffers(Thread* thread) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700220
Ian Rogers6fac4472014-02-25 17:01:10 -0800221 // Revoke any sort of thread-local buffers that are used to speed up allocations for all the
222 // threads, if the alloc space implementation uses any.
223 virtual void RevokeAllThreadLocalBuffers() = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700224
Ian Rogers1d54e732013-05-02 21:10:01 -0700225 protected:
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700226 struct SweepCallbackContext {
227 SweepCallbackContext(bool swap_bitmaps, space::Space* space);
228 const bool swap_bitmaps;
229 space::Space* const space;
230 Thread* const self;
231 collector::ObjectBytePair freed;
232 };
233
Ian Rogers1d54e732013-05-02 21:10:01 -0700234 AllocSpace() {}
235 virtual ~AllocSpace() {}
236
237 private:
238 DISALLOW_COPY_AND_ASSIGN(AllocSpace);
239};
240
241// Continuous spaces have bitmaps, and an address range. Although not required, objects within
242// continuous spaces can be marked in the card table.
243class ContinuousSpace : public Space {
244 public:
Mathieu Chartier590fee92013-09-13 13:46:47 -0700245 // Address at which the space begins.
Ian Rogers1d54e732013-05-02 21:10:01 -0700246 byte* Begin() const {
247 return begin_;
248 }
249
Mathieu Chartier590fee92013-09-13 13:46:47 -0700250 // Current address at which the space ends, which may vary as the space is filled.
Ian Rogers1d54e732013-05-02 21:10:01 -0700251 byte* End() const {
252 return end_;
253 }
254
Mathieu Chartier590fee92013-09-13 13:46:47 -0700255 // The end of the address range covered by the space.
256 byte* Limit() const {
257 return limit_;
258 }
259
260 // Change the end of the space. Be careful with use since changing the end of a space to an
261 // invalid value may break the GC.
262 void SetEnd(byte* end) {
263 end_ = end;
264 }
265
266 void SetLimit(byte* limit) {
267 limit_ = limit;
268 }
269
Ian Rogers1d54e732013-05-02 21:10:01 -0700270 // Current size of space
271 size_t Size() const {
272 return End() - Begin();
273 }
274
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700275 virtual accounting::ContinuousSpaceBitmap* GetLiveBitmap() const = 0;
276 virtual accounting::ContinuousSpaceBitmap* GetMarkBitmap() const = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700277
Mathieu Chartier590fee92013-09-13 13:46:47 -0700278 // Maximum which the mapped space can grow to.
279 virtual size_t Capacity() const {
280 return Limit() - Begin();
281 }
282
Ian Rogers1d54e732013-05-02 21:10:01 -0700283 // Is object within this space? We check to see if the pointer is beyond the end first as
284 // continuous spaces are iterated over from low to high.
285 bool HasAddress(const mirror::Object* obj) const {
286 const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700287 return byte_ptr >= Begin() && byte_ptr < Limit();
Ian Rogers1d54e732013-05-02 21:10:01 -0700288 }
289
290 bool Contains(const mirror::Object* obj) const {
291 return HasAddress(obj);
292 }
293
Mathieu Chartier590fee92013-09-13 13:46:47 -0700294 virtual bool IsContinuousSpace() const {
295 return true;
296 }
297
Ian Rogers1d54e732013-05-02 21:10:01 -0700298 virtual ~ContinuousSpace() {}
299
300 protected:
301 ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy,
Mathieu Chartier590fee92013-09-13 13:46:47 -0700302 byte* begin, byte* end, byte* limit) :
303 Space(name, gc_retention_policy), begin_(begin), end_(end), limit_(limit) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700304 }
305
Ian Rogers1d54e732013-05-02 21:10:01 -0700306 // The beginning of the storage for fast access.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700307 byte* begin_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700308
309 // Current end of the space.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700310 byte* volatile end_;
311
312 // Limit of the space.
313 byte* limit_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700314
315 private:
316 DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
317};
318
319// A space where objects may be allocated higgledy-piggledy throughout virtual memory. Currently
320// the card table can't cover these objects and so the write barrier shouldn't be triggered. This
321// is suitable for use for large primitive arrays.
322class DiscontinuousSpace : public Space {
323 public:
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700324 accounting::LargeObjectBitmap* GetLiveBitmap() const {
325 return live_bitmap_.get();
Ian Rogers1d54e732013-05-02 21:10:01 -0700326 }
327
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700328 accounting::LargeObjectBitmap* GetMarkBitmap() const {
329 return mark_bitmap_.get();
Ian Rogers1d54e732013-05-02 21:10:01 -0700330 }
331
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700332 virtual bool IsDiscontinuousSpace() const OVERRIDE {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700333 return true;
334 }
335
Ian Rogers1d54e732013-05-02 21:10:01 -0700336 virtual ~DiscontinuousSpace() {}
337
338 protected:
339 DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
340
Ian Rogers700a4022014-05-19 16:49:03 -0700341 std::unique_ptr<accounting::LargeObjectBitmap> live_bitmap_;
342 std::unique_ptr<accounting::LargeObjectBitmap> mark_bitmap_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700343
344 private:
345 DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace);
346};
347
348class MemMapSpace : public ContinuousSpace {
349 public:
Ian Rogers1d54e732013-05-02 21:10:01 -0700350 // Size of the space without a limit on its growth. By default this is just the Capacity, but
351 // for the allocation space we support starting with a small heap and then extending it.
352 virtual size_t NonGrowthLimitCapacity() const {
353 return Capacity();
354 }
355
Ian Rogers1d54e732013-05-02 21:10:01 -0700356 MemMap* GetMemMap() {
357 return mem_map_.get();
358 }
359
360 const MemMap* GetMemMap() const {
361 return mem_map_.get();
362 }
363
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800364 MemMap* ReleaseMemMap() {
365 return mem_map_.release();
366 }
367
Mathieu Chartier590fee92013-09-13 13:46:47 -0700368 protected:
369 MemMapSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end, byte* limit,
370 GcRetentionPolicy gc_retention_policy)
371 : ContinuousSpace(name, gc_retention_policy, begin, end, limit),
372 mem_map_(mem_map) {
373 }
374
Ian Rogers1d54e732013-05-02 21:10:01 -0700375 // Underlying storage of the space
Ian Rogers700a4022014-05-19 16:49:03 -0700376 std::unique_ptr<MemMap> mem_map_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700377
Mathieu Chartier590fee92013-09-13 13:46:47 -0700378 private:
Ian Rogers1d54e732013-05-02 21:10:01 -0700379 DISALLOW_COPY_AND_ASSIGN(MemMapSpace);
380};
381
Mathieu Chartier590fee92013-09-13 13:46:47 -0700382// Used by the heap compaction interface to enable copying from one type of alloc space to another.
383class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
384 public:
Ian Rogers6fac4472014-02-25 17:01:10 -0800385 bool IsAllocSpace() const OVERRIDE {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700386 return true;
387 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800388 AllocSpace* AsAllocSpace() OVERRIDE {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700389 return this;
390 }
391
Ian Rogers6fac4472014-02-25 17:01:10 -0800392 bool IsContinuousMemMapAllocSpace() const OVERRIDE {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800393 return true;
394 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800395 ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800396 return this;
397 }
398
399 bool HasBoundBitmaps() const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
400 void BindLiveToMarkBitmap()
401 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
402 void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Mathieu Chartier1f3b5352014-02-03 14:00:42 -0800403 // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
404 void SwapBitmaps();
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800405
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700406 // Clear the space back to an empty space.
Ian Rogers6fac4472014-02-25 17:01:10 -0800407 virtual void Clear() = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700408
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700409 accounting::ContinuousSpaceBitmap* GetLiveBitmap() const {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800410 return live_bitmap_.get();
411 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800412
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700413 accounting::ContinuousSpaceBitmap* GetMarkBitmap() const {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800414 return mark_bitmap_.get();
415 }
416
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700417 collector::ObjectBytePair Sweep(bool swap_bitmaps);
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700418 virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() = 0;
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800419
Mathieu Chartier590fee92013-09-13 13:46:47 -0700420 protected:
Ian Rogers700a4022014-05-19 16:49:03 -0700421 std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap_;
422 std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
423 std::unique_ptr<accounting::ContinuousSpaceBitmap> temp_bitmap_;
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800424
Mathieu Chartier590fee92013-09-13 13:46:47 -0700425 ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, byte* begin,
426 byte* end, byte* limit, GcRetentionPolicy gc_retention_policy)
427 : MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) {
428 }
429
430 private:
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800431 friend class gc::Heap;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700432 DISALLOW_COPY_AND_ASSIGN(ContinuousMemMapAllocSpace);
433};
434
Ian Rogers1d54e732013-05-02 21:10:01 -0700435} // namespace space
436} // namespace gc
437} // namespace art
438
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700439#endif // ART_RUNTIME_GC_SPACE_SPACE_H_