Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 17 | #ifndef ART_RUNTIME_GC_SPACE_SPACE_H_ |
| 18 | #define ART_RUNTIME_GC_SPACE_SPACE_H_ |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 19 | |
| 20 | #include <string> |
| 21 | |
| 22 | #include "UniquePtr.h" |
| 23 | #include "base/macros.h" |
| 24 | #include "base/mutex.h" |
| 25 | #include "gc/accounting/space_bitmap.h" |
| 26 | #include "globals.h" |
| 27 | #include "image.h" |
| 28 | #include "mem_map.h" |
| 29 | |
| 30 | namespace art { |
| 31 | namespace mirror { |
| 32 | class Object; |
| 33 | } // namespace mirror |
| 34 | |
| 35 | namespace gc { |
| 36 | |
| 37 | namespace accounting { |
| 38 | class SpaceBitmap; |
Brian Carlstrom | 7934ac2 | 2013-07-26 10:54:15 -0700 | [diff] [blame] | 39 | } // namespace accounting |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 40 | |
| 41 | class Heap; |
| 42 | |
| 43 | namespace space { |
| 44 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 45 | class AllocSpace; |
Mathieu Chartier | 7410f29 | 2013-11-24 13:17:35 -0800 | [diff] [blame] | 46 | class BumpPointerSpace; |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 47 | class ContinuousMemMapAllocSpace; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 48 | class ContinuousSpace; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 49 | class DiscontinuousSpace; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 50 | class MallocSpace; |
| 51 | class DlMallocSpace; |
| 52 | class RosAllocSpace; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 53 | class ImageSpace; |
| 54 | class LargeObjectSpace; |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 55 | class ZygoteSpace; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 56 | |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 57 | static constexpr bool kDebugSpaces = kIsDebugBuild; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 58 | |
| 59 | // See Space::GetGcRetentionPolicy. |
| 60 | enum GcRetentionPolicy { |
| 61 | // Objects are retained forever with this policy for a space. |
| 62 | kGcRetentionPolicyNeverCollect, |
| 63 | // Every GC cycle will attempt to collect objects in this space. |
| 64 | kGcRetentionPolicyAlwaysCollect, |
| 65 | // Objects will be considered for collection only in "full" GC cycles, ie faster partial |
| 66 | // collections won't scan these areas such as the Zygote. |
| 67 | kGcRetentionPolicyFullCollect, |
| 68 | }; |
| 69 | std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy); |
| 70 | |
| 71 | enum SpaceType { |
| 72 | kSpaceTypeImageSpace, |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 73 | kSpaceTypeMallocSpace, |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 74 | kSpaceTypeZygoteSpace, |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 75 | kSpaceTypeBumpPointerSpace, |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 76 | kSpaceTypeLargeObjectSpace, |
| 77 | }; |
| 78 | std::ostream& operator<<(std::ostream& os, const SpaceType& space_type); |
| 79 | |
| 80 | // A space contains memory allocated for managed objects. |
| 81 | class Space { |
| 82 | public: |
| 83 | // Dump space. Also key method for C++ vtables. |
| 84 | virtual void Dump(std::ostream& os) const; |
| 85 | |
| 86 | // Name of the space. May vary, for example before/after the Zygote fork. |
| 87 | const char* GetName() const { |
| 88 | return name_.c_str(); |
| 89 | } |
| 90 | |
| 91 | // The policy of when objects are collected associated with this space. |
| 92 | GcRetentionPolicy GetGcRetentionPolicy() const { |
| 93 | return gc_retention_policy_; |
| 94 | } |
| 95 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 96 | // Is the given object contained within this space? |
| 97 | virtual bool Contains(const mirror::Object* obj) const = 0; |
| 98 | |
| 99 | // The kind of space this: image, alloc, zygote, large object. |
| 100 | virtual SpaceType GetType() const = 0; |
| 101 | |
| 102 | // Is this an image space, ie one backed by a memory mapped image file. |
| 103 | bool IsImageSpace() const { |
| 104 | return GetType() == kSpaceTypeImageSpace; |
| 105 | } |
| 106 | ImageSpace* AsImageSpace(); |
| 107 | |
| 108 | // Is this a dlmalloc backed allocation space? |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 109 | bool IsMallocSpace() const { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 110 | SpaceType type = GetType(); |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 111 | return type == kSpaceTypeMallocSpace; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 112 | } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 113 | MallocSpace* AsMallocSpace(); |
| 114 | |
| 115 | virtual bool IsDlMallocSpace() const { |
| 116 | return false; |
| 117 | } |
| 118 | virtual DlMallocSpace* AsDlMallocSpace() { |
| 119 | LOG(FATAL) << "Unreachable"; |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 120 | return nullptr; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 121 | } |
| 122 | virtual bool IsRosAllocSpace() const { |
| 123 | return false; |
| 124 | } |
| 125 | virtual RosAllocSpace* AsRosAllocSpace() { |
| 126 | LOG(FATAL) << "Unreachable"; |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 127 | return nullptr; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 128 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 129 | |
| 130 | // Is this the space allocated into by the Zygote and no-longer in use? |
| 131 | bool IsZygoteSpace() const { |
| 132 | return GetType() == kSpaceTypeZygoteSpace; |
| 133 | } |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 134 | virtual ZygoteSpace* AsZygoteSpace() { |
| 135 | LOG(FATAL) << "Unreachable"; |
| 136 | return nullptr; |
| 137 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 138 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 139 | // Is this space a bump pointer space? |
| 140 | bool IsBumpPointerSpace() const { |
| 141 | return GetType() == kSpaceTypeBumpPointerSpace; |
| 142 | } |
Mathieu Chartier | 7410f29 | 2013-11-24 13:17:35 -0800 | [diff] [blame] | 143 | virtual BumpPointerSpace* AsBumpPointerSpace() { |
| 144 | LOG(FATAL) << "Unreachable"; |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 145 | return nullptr; |
Mathieu Chartier | 7410f29 | 2013-11-24 13:17:35 -0800 | [diff] [blame] | 146 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 147 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 148 | // Does this space hold large objects and implement the large object space abstraction? |
| 149 | bool IsLargeObjectSpace() const { |
| 150 | return GetType() == kSpaceTypeLargeObjectSpace; |
| 151 | } |
| 152 | LargeObjectSpace* AsLargeObjectSpace(); |
| 153 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 154 | virtual bool IsContinuousSpace() const { |
| 155 | return false; |
| 156 | } |
| 157 | ContinuousSpace* AsContinuousSpace(); |
| 158 | |
| 159 | virtual bool IsDiscontinuousSpace() const { |
| 160 | return false; |
| 161 | } |
| 162 | DiscontinuousSpace* AsDiscontinuousSpace(); |
| 163 | |
| 164 | virtual bool IsAllocSpace() const { |
| 165 | return false; |
| 166 | } |
| 167 | virtual AllocSpace* AsAllocSpace() { |
| 168 | LOG(FATAL) << "Unimplemented"; |
| 169 | return nullptr; |
| 170 | } |
| 171 | |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 172 | virtual bool IsContinuousMemMapAllocSpace() const { |
| 173 | return false; |
| 174 | } |
| 175 | virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() { |
| 176 | LOG(FATAL) << "Unimplemented"; |
| 177 | return nullptr; |
| 178 | } |
| 179 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 180 | virtual ~Space() {} |
| 181 | |
| 182 | protected: |
| 183 | Space(const std::string& name, GcRetentionPolicy gc_retention_policy); |
| 184 | |
| 185 | void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) { |
| 186 | gc_retention_policy_ = gc_retention_policy; |
| 187 | } |
| 188 | |
| 189 | // Name of the space that may vary due to the Zygote fork. |
| 190 | std::string name_; |
| 191 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 192 | protected: |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 193 | struct SweepCallbackContext { |
| 194 | bool swap_bitmaps; |
| 195 | Heap* heap; |
| 196 | space::Space* space; |
| 197 | Thread* self; |
| 198 | size_t freed_objects; |
| 199 | size_t freed_bytes; |
| 200 | }; |
| 201 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 202 | // When should objects within this space be reclaimed? Not constant as we vary it in the case |
| 203 | // of Zygote forking. |
| 204 | GcRetentionPolicy gc_retention_policy_; |
| 205 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 206 | private: |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 207 | friend class art::gc::Heap; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 208 | DISALLOW_COPY_AND_ASSIGN(Space); |
| 209 | }; |
| 210 | std::ostream& operator<<(std::ostream& os, const Space& space); |
| 211 | |
| 212 | // AllocSpace interface. |
| 213 | class AllocSpace { |
| 214 | public: |
| 215 | // Number of bytes currently allocated. |
Hiroshi Yamauchi | be031ff | 2013-10-08 16:42:37 -0700 | [diff] [blame] | 216 | virtual uint64_t GetBytesAllocated() = 0; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 217 | // Number of objects currently allocated. |
Hiroshi Yamauchi | be031ff | 2013-10-08 16:42:37 -0700 | [diff] [blame] | 218 | virtual uint64_t GetObjectsAllocated() = 0; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 219 | |
Hiroshi Yamauchi | 50b2928 | 2013-07-30 13:58:37 -0700 | [diff] [blame] | 220 | // Allocate num_bytes without allowing growth. If the allocation |
| 221 | // succeeds, the output parameter bytes_allocated will be set to the |
| 222 | // actually allocated bytes which is >= num_bytes. |
| 223 | virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) = 0; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 224 | |
| 225 | // Return the storage space required by obj. |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame^] | 226 | virtual size_t AllocationSize(mirror::Object* obj) = 0; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 227 | |
| 228 | // Returns how many bytes were freed. |
| 229 | virtual size_t Free(Thread* self, mirror::Object* ptr) = 0; |
| 230 | |
| 231 | // Returns how many bytes were freed. |
| 232 | virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0; |
| 233 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 234 | // Revoke any sort of thread-local buffers that are used to speed up |
| 235 | // allocations for the given thread, if the alloc space |
| 236 | // implementation uses any. No-op by default. |
| 237 | virtual void RevokeThreadLocalBuffers(Thread* /*thread*/) {} |
| 238 | |
| 239 | // Revoke any sort of thread-local buffers that are used to speed up |
| 240 | // allocations for all the threads, if the alloc space |
| 241 | // implementation uses any. No-op by default. |
| 242 | virtual void RevokeAllThreadLocalBuffers() {} |
| 243 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 244 | protected: |
| 245 | AllocSpace() {} |
| 246 | virtual ~AllocSpace() {} |
| 247 | |
| 248 | private: |
| 249 | DISALLOW_COPY_AND_ASSIGN(AllocSpace); |
| 250 | }; |
| 251 | |
| 252 | // Continuous spaces have bitmaps, and an address range. Although not required, objects within |
| 253 | // continuous spaces can be marked in the card table. |
| 254 | class ContinuousSpace : public Space { |
| 255 | public: |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 256 | // Address at which the space begins. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 257 | byte* Begin() const { |
| 258 | return begin_; |
| 259 | } |
| 260 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 261 | // Current address at which the space ends, which may vary as the space is filled. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 262 | byte* End() const { |
| 263 | return end_; |
| 264 | } |
| 265 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 266 | // The end of the address range covered by the space. |
| 267 | byte* Limit() const { |
| 268 | return limit_; |
| 269 | } |
| 270 | |
| 271 | // Change the end of the space. Be careful with use since changing the end of a space to an |
| 272 | // invalid value may break the GC. |
| 273 | void SetEnd(byte* end) { |
| 274 | end_ = end; |
| 275 | } |
| 276 | |
| 277 | void SetLimit(byte* limit) { |
| 278 | limit_ = limit; |
| 279 | } |
| 280 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 281 | // Current size of space |
| 282 | size_t Size() const { |
| 283 | return End() - Begin(); |
| 284 | } |
| 285 | |
| 286 | virtual accounting::SpaceBitmap* GetLiveBitmap() const = 0; |
| 287 | virtual accounting::SpaceBitmap* GetMarkBitmap() const = 0; |
| 288 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 289 | // Maximum which the mapped space can grow to. |
| 290 | virtual size_t Capacity() const { |
| 291 | return Limit() - Begin(); |
| 292 | } |
| 293 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 294 | // Is object within this space? We check to see if the pointer is beyond the end first as |
| 295 | // continuous spaces are iterated over from low to high. |
| 296 | bool HasAddress(const mirror::Object* obj) const { |
| 297 | const byte* byte_ptr = reinterpret_cast<const byte*>(obj); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 298 | return byte_ptr >= Begin() && byte_ptr < Limit(); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 299 | } |
| 300 | |
| 301 | bool Contains(const mirror::Object* obj) const { |
| 302 | return HasAddress(obj); |
| 303 | } |
| 304 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 305 | virtual bool IsContinuousSpace() const { |
| 306 | return true; |
| 307 | } |
| 308 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 309 | virtual ~ContinuousSpace() {} |
| 310 | |
| 311 | protected: |
| 312 | ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy, |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 313 | byte* begin, byte* end, byte* limit) : |
| 314 | Space(name, gc_retention_policy), begin_(begin), end_(end), limit_(limit) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 315 | } |
| 316 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 317 | // The beginning of the storage for fast access. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 318 | byte* begin_; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 319 | |
| 320 | // Current end of the space. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 321 | byte* volatile end_; |
| 322 | |
| 323 | // Limit of the space. |
| 324 | byte* limit_; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 325 | |
| 326 | private: |
| 327 | DISALLOW_COPY_AND_ASSIGN(ContinuousSpace); |
| 328 | }; |
| 329 | |
| 330 | // A space where objects may be allocated higgledy-piggledy throughout virtual memory. Currently |
| 331 | // the card table can't cover these objects and so the write barrier shouldn't be triggered. This |
| 332 | // is suitable for use for large primitive arrays. |
| 333 | class DiscontinuousSpace : public Space { |
| 334 | public: |
Mathieu Chartier | db7f37d | 2014-01-10 11:09:06 -0800 | [diff] [blame] | 335 | accounting::ObjectSet* GetLiveObjects() const { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 336 | return live_objects_.get(); |
| 337 | } |
| 338 | |
Mathieu Chartier | db7f37d | 2014-01-10 11:09:06 -0800 | [diff] [blame] | 339 | accounting::ObjectSet* GetMarkObjects() const { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 340 | return mark_objects_.get(); |
| 341 | } |
| 342 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 343 | virtual bool IsDiscontinuousSpace() const { |
| 344 | return true; |
| 345 | } |
| 346 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 347 | virtual ~DiscontinuousSpace() {} |
| 348 | |
| 349 | protected: |
| 350 | DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy); |
| 351 | |
Mathieu Chartier | db7f37d | 2014-01-10 11:09:06 -0800 | [diff] [blame] | 352 | UniquePtr<accounting::ObjectSet> live_objects_; |
| 353 | UniquePtr<accounting::ObjectSet> mark_objects_; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 354 | |
| 355 | private: |
| 356 | DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace); |
| 357 | }; |
| 358 | |
| 359 | class MemMapSpace : public ContinuousSpace { |
| 360 | public: |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 361 | // Size of the space without a limit on its growth. By default this is just the Capacity, but |
| 362 | // for the allocation space we support starting with a small heap and then extending it. |
| 363 | virtual size_t NonGrowthLimitCapacity() const { |
| 364 | return Capacity(); |
| 365 | } |
| 366 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 367 | MemMap* GetMemMap() { |
| 368 | return mem_map_.get(); |
| 369 | } |
| 370 | |
| 371 | const MemMap* GetMemMap() const { |
| 372 | return mem_map_.get(); |
| 373 | } |
| 374 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 375 | MemMap* ReleaseMemMap() { |
| 376 | return mem_map_.release(); |
| 377 | } |
| 378 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 379 | protected: |
| 380 | MemMapSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end, byte* limit, |
| 381 | GcRetentionPolicy gc_retention_policy) |
| 382 | : ContinuousSpace(name, gc_retention_policy, begin, end, limit), |
| 383 | mem_map_(mem_map) { |
| 384 | } |
| 385 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 386 | // Underlying storage of the space |
| 387 | UniquePtr<MemMap> mem_map_; |
| 388 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 389 | private: |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 390 | DISALLOW_COPY_AND_ASSIGN(MemMapSpace); |
| 391 | }; |
| 392 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 393 | // Used by the heap compaction interface to enable copying from one type of alloc space to another. |
| 394 | class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace { |
| 395 | public: |
| 396 | virtual bool IsAllocSpace() const { |
| 397 | return true; |
| 398 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 399 | virtual AllocSpace* AsAllocSpace() { |
| 400 | return this; |
| 401 | } |
| 402 | |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 403 | virtual bool IsContinuousMemMapAllocSpace() const { |
| 404 | return true; |
| 405 | } |
| 406 | virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() { |
| 407 | return this; |
| 408 | } |
| 409 | |
| 410 | bool HasBoundBitmaps() const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
| 411 | void BindLiveToMarkBitmap() |
| 412 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
| 413 | void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
Mathieu Chartier | 1f3b535 | 2014-02-03 14:00:42 -0800 | [diff] [blame] | 414 | // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping. |
| 415 | void SwapBitmaps(); |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 416 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 417 | virtual void Clear() { |
| 418 | LOG(FATAL) << "Unimplemented"; |
| 419 | } |
| 420 | |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 421 | virtual accounting::SpaceBitmap* GetLiveBitmap() const { |
| 422 | return live_bitmap_.get(); |
| 423 | } |
| 424 | virtual accounting::SpaceBitmap* GetMarkBitmap() const { |
| 425 | return mark_bitmap_.get(); |
| 426 | } |
| 427 | |
| 428 | virtual void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes); |
| 429 | virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() { |
| 430 | LOG(FATAL) << "Unimplemented"; |
| 431 | return nullptr; |
| 432 | } |
| 433 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 434 | protected: |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 435 | UniquePtr<accounting::SpaceBitmap> live_bitmap_; |
| 436 | UniquePtr<accounting::SpaceBitmap> mark_bitmap_; |
| 437 | UniquePtr<accounting::SpaceBitmap> temp_bitmap_; |
| 438 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 439 | ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, byte* begin, |
| 440 | byte* end, byte* limit, GcRetentionPolicy gc_retention_policy) |
| 441 | : MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) { |
| 442 | } |
| 443 | |
| 444 | private: |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 445 | friend class gc::Heap; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 446 | DISALLOW_COPY_AND_ASSIGN(ContinuousMemMapAllocSpace); |
| 447 | }; |
| 448 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 449 | } // namespace space |
| 450 | } // namespace gc |
| 451 | } // namespace art |
| 452 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 453 | #endif // ART_RUNTIME_GC_SPACE_SPACE_H_ |