blob: f3b9595305073145cd459c23774379982c4babd5 [file] [log] [blame]
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
19
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -080020#include "gc/accounting/read_barrier_table.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080021#include "object_callbacks.h"
22#include "space.h"
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -080023#include "thread.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080024
25namespace art {
26namespace gc {
27namespace space {
28
29// A space that consists of equal-sized regions.
30class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
31 public:
32 typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
33
34 SpaceType GetType() const OVERRIDE {
35 return kSpaceTypeRegionSpace;
36 }
37
38 // Create a region space with the requested sizes. The requested base address is not
39 // guaranteed to be granted, if it is required, the caller should call Begin on the returned
40 // space to confirm the request was granted.
41 static RegionSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
42
Mathieu Chartier2cebb242015-04-21 16:50:40 -070043 // Allocate num_bytes, returns null if the space is full.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080044 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Mathieu Chartier90443472015-07-16 20:32:27 -070045 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
46 OVERRIDE REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080047 // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
48 mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070049 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -070050 OVERRIDE REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080051 // The main allocation routine.
52 template<bool kForEvac>
53 ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070054 size_t* usable_size,
Mathieu Chartier90443472015-07-16 20:32:27 -070055 size_t* bytes_tl_bulk_allocated)
56 REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080057 // Allocate/free large objects (objects that are larger than the region size.)
58 template<bool kForEvac>
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070059 mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size,
Mathieu Chartier90443472015-07-16 20:32:27 -070060 size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_);
61 void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080062
63 // Return the storage space required by obj.
64 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070065 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080066 return AllocationSizeNonvirtual(obj, usable_size);
67 }
68 size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070069 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080070
71 size_t Free(Thread*, mirror::Object*) OVERRIDE {
72 UNIMPLEMENTED(FATAL);
73 return 0;
74 }
75 size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
76 UNIMPLEMENTED(FATAL);
77 return 0;
78 }
79 accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
Mathieu Chartier7ec38dc2016-10-07 15:24:46 -070080 return mark_bitmap_.get();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080081 }
82 accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
Mathieu Chartier7ec38dc2016-10-07 15:24:46 -070083 return mark_bitmap_.get();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080084 }
85
Mathieu Chartier90443472015-07-16 20:32:27 -070086 void Clear() OVERRIDE REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080087
88 void Dump(std::ostream& os) const;
Mathieu Chartier90443472015-07-16 20:32:27 -070089 void DumpRegions(std::ostream& os) REQUIRES(!region_lock_);
90 void DumpNonFreeRegions(std::ostream& os) REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080091
Mathieu Chartier90443472015-07-16 20:32:27 -070092 size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!region_lock_);
93 void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(region_lock_);
94 size_t RevokeAllThreadLocalBuffers()
95 REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
96 void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!region_lock_);
97 void AssertAllThreadLocalBuffersAreRevoked()
98 REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080099
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800100 enum class RegionType : uint8_t {
101 kRegionTypeAll, // All types.
102 kRegionTypeFromSpace, // From-space. To be evacuated.
103 kRegionTypeUnevacFromSpace, // Unevacuated from-space. Not to be evacuated.
104 kRegionTypeToSpace, // To-space.
105 kRegionTypeNone, // None.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800106 };
107
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800108 enum class RegionState : uint8_t {
109 kRegionStateFree, // Free region.
110 kRegionStateAllocated, // Allocated region.
111 kRegionStateLarge, // Large allocated (allocation larger than the region size).
112 kRegionStateLargeTail, // Large tail (non-first regions of a large allocation).
113 };
114
Mathieu Chartier90443472015-07-16 20:32:27 -0700115 template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal() REQUIRES(!region_lock_);
116 template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal() REQUIRES(!region_lock_);
117 uint64_t GetBytesAllocated() REQUIRES(!region_lock_) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800118 return GetBytesAllocatedInternal<RegionType::kRegionTypeAll>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800119 }
Mathieu Chartier90443472015-07-16 20:32:27 -0700120 uint64_t GetObjectsAllocated() REQUIRES(!region_lock_) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800121 return GetObjectsAllocatedInternal<RegionType::kRegionTypeAll>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800122 }
Mathieu Chartier90443472015-07-16 20:32:27 -0700123 uint64_t GetBytesAllocatedInFromSpace() REQUIRES(!region_lock_) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800124 return GetBytesAllocatedInternal<RegionType::kRegionTypeFromSpace>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800125 }
Mathieu Chartier90443472015-07-16 20:32:27 -0700126 uint64_t GetObjectsAllocatedInFromSpace() REQUIRES(!region_lock_) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800127 return GetObjectsAllocatedInternal<RegionType::kRegionTypeFromSpace>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800128 }
Mathieu Chartier90443472015-07-16 20:32:27 -0700129 uint64_t GetBytesAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800130 return GetBytesAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800131 }
Mathieu Chartier90443472015-07-16 20:32:27 -0700132 uint64_t GetObjectsAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800133 return GetObjectsAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800134 }
135
136 bool CanMoveObjects() const OVERRIDE {
137 return true;
138 }
139
140 bool Contains(const mirror::Object* obj) const {
141 const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
142 return byte_obj >= Begin() && byte_obj < Limit();
143 }
144
145 RegionSpace* AsRegionSpace() OVERRIDE {
146 return this;
147 }
148
149 // Go through all of the blocks and visit the continuous objects.
150 void Walk(ObjectCallback* callback, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700151 REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800152 WalkInternal<false>(callback, arg);
153 }
154
155 void WalkToSpace(ObjectCallback* callback, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700156 REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800157 WalkInternal<true>(callback, arg);
158 }
159
160 accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
161 return nullptr;
162 }
163 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700164 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800165
166 // Object alignment within the space.
167 static constexpr size_t kAlignment = kObjectAlignment;
168 // The region size.
169 static constexpr size_t kRegionSize = 1 * MB;
170
171 bool IsInFromSpace(mirror::Object* ref) {
172 if (HasAddress(ref)) {
173 Region* r = RefToRegionUnlocked(ref);
174 return r->IsInFromSpace();
175 }
176 return false;
177 }
178
179 bool IsInUnevacFromSpace(mirror::Object* ref) {
180 if (HasAddress(ref)) {
181 Region* r = RefToRegionUnlocked(ref);
182 return r->IsInUnevacFromSpace();
183 }
184 return false;
185 }
186
187 bool IsInToSpace(mirror::Object* ref) {
188 if (HasAddress(ref)) {
189 Region* r = RefToRegionUnlocked(ref);
190 return r->IsInToSpace();
191 }
192 return false;
193 }
194
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800195 RegionType GetRegionType(mirror::Object* ref) {
196 if (HasAddress(ref)) {
197 Region* r = RefToRegionUnlocked(ref);
198 return r->Type();
199 }
200 return RegionType::kRegionTypeNone;
201 }
202
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800203 void SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all)
Mathieu Chartier90443472015-07-16 20:32:27 -0700204 REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800205
Mathieu Chartier90443472015-07-16 20:32:27 -0700206 size_t FromSpaceSize() REQUIRES(!region_lock_);
207 size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
208 size_t ToSpaceSize() REQUIRES(!region_lock_);
209 void ClearFromSpace() REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800210
211 void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800212 Region* reg = RefToRegionUnlocked(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800213 reg->AddLiveBytes(alloc_size);
214 }
215
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800216 void AssertAllRegionLiveBytesZeroOrCleared() REQUIRES(!region_lock_) {
217 if (kIsDebugBuild) {
218 MutexLock mu(Thread::Current(), region_lock_);
219 for (size_t i = 0; i < num_regions_; ++i) {
220 Region* r = &regions_[i];
221 size_t live_bytes = r->LiveBytes();
222 CHECK(live_bytes == 0U || live_bytes == static_cast<size_t>(-1)) << live_bytes;
223 }
224 }
225 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800226
Mathieu Chartier90443472015-07-16 20:32:27 -0700227 void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_);
228 bool AllocNewTlab(Thread* self) REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800229
230 uint32_t Time() {
231 return time_;
232 }
233
234 private:
235 RegionSpace(const std::string& name, MemMap* mem_map);
236
237 template<bool kToSpaceOnly>
238 void WalkInternal(ObjectCallback* callback, void* arg) NO_THREAD_SAFETY_ANALYSIS;
239
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800240 class Region {
241 public:
242 Region()
243 : idx_(static_cast<size_t>(-1)),
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800244 begin_(nullptr), top_(nullptr), end_(nullptr),
245 state_(RegionState::kRegionStateAllocated), type_(RegionType::kRegionTypeToSpace),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800246 objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
247 is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {}
248
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700249 void Init(size_t idx, uint8_t* begin, uint8_t* end) {
250 idx_ = idx;
251 begin_ = begin;
252 top_.StoreRelaxed(begin);
253 end_ = end;
254 state_ = RegionState::kRegionStateFree;
255 type_ = RegionType::kRegionTypeNone;
256 objects_allocated_.StoreRelaxed(0);
257 alloc_time_ = 0;
258 live_bytes_ = static_cast<size_t>(-1);
259 is_newly_allocated_ = false;
260 is_a_tlab_ = false;
261 thread_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800262 DCHECK_LT(begin, end);
263 DCHECK_EQ(static_cast<size_t>(end - begin), kRegionSize);
264 }
265
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800266 RegionState State() const {
267 return state_;
268 }
269
270 RegionType Type() const {
271 return type_;
272 }
273
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800274 void Clear() {
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700275 top_.StoreRelaxed(begin_);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800276 state_ = RegionState::kRegionStateFree;
277 type_ = RegionType::kRegionTypeNone;
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700278 objects_allocated_.StoreRelaxed(0);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800279 alloc_time_ = 0;
280 live_bytes_ = static_cast<size_t>(-1);
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700281 ZeroAndReleasePages(begin_, end_ - begin_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800282 is_newly_allocated_ = false;
283 is_a_tlab_ = false;
284 thread_ = nullptr;
285 }
286
287 ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700288 size_t* usable_size,
289 size_t* bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800290
291 bool IsFree() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800292 bool is_free = state_ == RegionState::kRegionStateFree;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800293 if (is_free) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800294 DCHECK(IsInNoSpace());
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700295 DCHECK_EQ(begin_, Top());
296 DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800297 }
298 return is_free;
299 }
300
301 // Given a free region, declare it non-free (allocated).
302 void Unfree(uint32_t alloc_time) {
303 DCHECK(IsFree());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800304 state_ = RegionState::kRegionStateAllocated;
305 type_ = RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800306 alloc_time_ = alloc_time;
307 }
308
309 void UnfreeLarge(uint32_t alloc_time) {
310 DCHECK(IsFree());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800311 state_ = RegionState::kRegionStateLarge;
312 type_ = RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800313 alloc_time_ = alloc_time;
314 }
315
316 void UnfreeLargeTail(uint32_t alloc_time) {
317 DCHECK(IsFree());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800318 state_ = RegionState::kRegionStateLargeTail;
319 type_ = RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800320 alloc_time_ = alloc_time;
321 }
322
323 void SetNewlyAllocated() {
324 is_newly_allocated_ = true;
325 }
326
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800327 // Non-large, non-large-tail allocated.
328 bool IsAllocated() const {
329 return state_ == RegionState::kRegionStateAllocated;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800330 }
331
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800332 // Large allocated.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800333 bool IsLarge() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800334 bool is_large = state_ == RegionState::kRegionStateLarge;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800335 if (is_large) {
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700336 DCHECK_LT(begin_ + 1 * MB, Top());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800337 }
338 return is_large;
339 }
340
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800341 // Large-tail allocated.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800342 bool IsLargeTail() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800343 bool is_large_tail = state_ == RegionState::kRegionStateLargeTail;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800344 if (is_large_tail) {
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700345 DCHECK_EQ(begin_, Top());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800346 }
347 return is_large_tail;
348 }
349
350 size_t Idx() const {
351 return idx_;
352 }
353
354 bool IsInFromSpace() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800355 return type_ == RegionType::kRegionTypeFromSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800356 }
357
358 bool IsInToSpace() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800359 return type_ == RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800360 }
361
362 bool IsInUnevacFromSpace() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800363 return type_ == RegionType::kRegionTypeUnevacFromSpace;
364 }
365
366 bool IsInNoSpace() const {
367 return type_ == RegionType::kRegionTypeNone;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800368 }
369
370 void SetAsFromSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800371 DCHECK(!IsFree() && IsInToSpace());
372 type_ = RegionType::kRegionTypeFromSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800373 live_bytes_ = static_cast<size_t>(-1);
374 }
375
376 void SetAsUnevacFromSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800377 DCHECK(!IsFree() && IsInToSpace());
378 type_ = RegionType::kRegionTypeUnevacFromSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800379 live_bytes_ = 0U;
380 }
381
382 void SetUnevacFromSpaceAsToSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800383 DCHECK(!IsFree() && IsInUnevacFromSpace());
384 type_ = RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800385 }
386
387 ALWAYS_INLINE bool ShouldBeEvacuated();
388
389 void AddLiveBytes(size_t live_bytes) {
390 DCHECK(IsInUnevacFromSpace());
391 DCHECK(!IsLargeTail());
392 DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
393 live_bytes_ += live_bytes;
394 DCHECK_LE(live_bytes_, BytesAllocated());
395 }
396
397 size_t LiveBytes() const {
398 return live_bytes_;
399 }
400
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800401 size_t BytesAllocated() const {
402 if (IsLarge()) {
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700403 DCHECK_LT(begin_ + kRegionSize, Top());
404 return static_cast<size_t>(Top() - begin_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800405 } else if (IsLargeTail()) {
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700406 DCHECK_EQ(begin_, Top());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800407 return 0;
408 } else {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800409 DCHECK(IsAllocated()) << static_cast<uint>(state_);
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700410 DCHECK_LE(begin_, Top());
411 size_t bytes = static_cast<size_t>(Top() - begin_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800412 DCHECK_LE(bytes, kRegionSize);
413 return bytes;
414 }
415 }
416
417 size_t ObjectsAllocated() const {
418 if (IsLarge()) {
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700419 DCHECK_LT(begin_ + 1 * MB, Top());
420 DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800421 return 1;
422 } else if (IsLargeTail()) {
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700423 DCHECK_EQ(begin_, Top());
424 DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800425 return 0;
426 } else {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800427 DCHECK(IsAllocated()) << static_cast<uint>(state_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800428 return objects_allocated_;
429 }
430 }
431
432 uint8_t* Begin() const {
433 return begin_;
434 }
435
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700436 ALWAYS_INLINE uint8_t* Top() const {
437 return top_.LoadRelaxed();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800438 }
439
440 void SetTop(uint8_t* new_top) {
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700441 top_.StoreRelaxed(new_top);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800442 }
443
444 uint8_t* End() const {
445 return end_;
446 }
447
448 bool Contains(mirror::Object* ref) const {
449 return begin_ <= reinterpret_cast<uint8_t*>(ref) && reinterpret_cast<uint8_t*>(ref) < end_;
450 }
451
452 void Dump(std::ostream& os) const;
453
454 void RecordThreadLocalAllocations(size_t num_objects, size_t num_bytes) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800455 DCHECK(IsAllocated());
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700456 DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
457 DCHECK_EQ(Top(), end_);
458 objects_allocated_.StoreRelaxed(num_objects);
459 top_.StoreRelaxed(begin_ + num_bytes);
460 DCHECK_EQ(Top(), end_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800461 }
462
463 private:
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700464 size_t idx_; // The region's index in the region space.
465 uint8_t* begin_; // The begin address of the region.
466 Atomic<uint8_t*> top_; // The current position of the allocation.
467 uint8_t* end_; // The end address of the region.
468 RegionState state_; // The region state (see RegionState).
469 RegionType type_; // The region type (see RegionType).
470 Atomic<size_t> objects_allocated_; // The number of objects allocated.
471 uint32_t alloc_time_; // The allocation time of the region.
472 size_t live_bytes_; // The live bytes. Used to compute the live percent.
473 bool is_newly_allocated_; // True if it's allocated after the last collection.
474 bool is_a_tlab_; // True if it's a tlab.
475 Thread* thread_; // The owning thread if it's a tlab.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800476
477 friend class RegionSpace;
478 };
479
Mathieu Chartier90443472015-07-16 20:32:27 -0700480 Region* RefToRegion(mirror::Object* ref) REQUIRES(!region_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800481 MutexLock mu(Thread::Current(), region_lock_);
482 return RefToRegionLocked(ref);
483 }
484
485 Region* RefToRegionUnlocked(mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
486 // For a performance reason (this is frequently called via
487 // IsInFromSpace() etc.) we avoid taking a lock here. Note that
488 // since we only change a region from to-space to from-space only
489 // during a pause (SetFromSpace()) and from from-space to free
490 // (after GC is done) as long as ref is a valid reference into an
491 // allocated region, it's safe to access the region state without
492 // the lock.
493 return RefToRegionLocked(ref);
494 }
495
Mathieu Chartier90443472015-07-16 20:32:27 -0700496 Region* RefToRegionLocked(mirror::Object* ref) REQUIRES(region_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800497 DCHECK(HasAddress(ref));
498 uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
499 size_t reg_idx = offset / kRegionSize;
500 DCHECK_LT(reg_idx, num_regions_);
501 Region* reg = &regions_[reg_idx];
502 DCHECK_EQ(reg->Idx(), reg_idx);
503 DCHECK(reg->Contains(ref));
504 return reg;
505 }
506
507 mirror::Object* GetNextObject(mirror::Object* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700508 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800509
510 Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
511
512 uint32_t time_; // The time as the number of collections since the startup.
513 size_t num_regions_; // The number of regions in this space.
514 size_t num_non_free_regions_; // The number of non-free regions in this space.
515 std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
516 // The pointer to the region array.
517 Region* current_region_; // The region that's being allocated currently.
518 Region* evac_region_; // The region that's being evacuated to currently.
519 Region full_region_; // The dummy/sentinel region that looks full.
520
Mathieu Chartier7ec38dc2016-10-07 15:24:46 -0700521 // Mark bitmap used by the GC.
522 std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
523
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800524 DISALLOW_COPY_AND_ASSIGN(RegionSpace);
525};
526
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800527std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionState& value);
528std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionType& value);
529
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800530} // namespace space
531} // namespace gc
532} // namespace art
533
534#endif // ART_RUNTIME_GC_SPACE_REGION_SPACE_H_