blob: b88ce241140f6b275e3adf77d2d9b794c994da2d [file] [log] [blame]
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
19
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -080020#include "gc/accounting/read_barrier_table.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080021#include "object_callbacks.h"
22#include "space.h"
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -080023#include "thread.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080024
25namespace art {
26namespace gc {
27namespace space {
28
29// A space that consists of equal-sized regions.
30class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
31 public:
32 typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
33
34 SpaceType GetType() const OVERRIDE {
35 return kSpaceTypeRegionSpace;
36 }
37
38 // Create a region space with the requested sizes. The requested base address is not
39 // guaranteed to be granted, if it is required, the caller should call Begin on the returned
40 // space to confirm the request was granted.
41 static RegionSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
42
43 // Allocate num_bytes, returns nullptr if the space is full.
44 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070045 size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080046 // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
47 mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070048 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080049 OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
50 // The main allocation routine.
51 template<bool kForEvac>
52 ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070053 size_t* usable_size,
54 size_t* bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080055 // Allocate/free large objects (objects that are larger than the region size.)
56 template<bool kForEvac>
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070057 mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size,
58 size_t* bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080059 void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated);
60
61 // Return the storage space required by obj.
62 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
63 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
64 return AllocationSizeNonvirtual(obj, usable_size);
65 }
66 size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
67 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
68
69 size_t Free(Thread*, mirror::Object*) OVERRIDE {
70 UNIMPLEMENTED(FATAL);
71 return 0;
72 }
73 size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
74 UNIMPLEMENTED(FATAL);
75 return 0;
76 }
77 accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
78 // No live bitmap.
79 return nullptr;
80 }
81 accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
82 // No mark bitmap.
83 return nullptr;
84 }
85
86 void Clear() OVERRIDE LOCKS_EXCLUDED(region_lock_);
87
88 void Dump(std::ostream& os) const;
89 void DumpRegions(std::ostream& os);
90 void DumpNonFreeRegions(std::ostream& os);
91
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070092 size_t RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080093 void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(region_lock_);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070094 size_t RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
95 Locks::thread_list_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080096 void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(region_lock_);
97 void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
98 Locks::thread_list_lock_);
99
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800100 enum class RegionType : uint8_t {
101 kRegionTypeAll, // All types.
102 kRegionTypeFromSpace, // From-space. To be evacuated.
103 kRegionTypeUnevacFromSpace, // Unevacuated from-space. Not to be evacuated.
104 kRegionTypeToSpace, // To-space.
105 kRegionTypeNone, // None.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800106 };
107
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800108 enum class RegionState : uint8_t {
109 kRegionStateFree, // Free region.
110 kRegionStateAllocated, // Allocated region.
111 kRegionStateLarge, // Large allocated (allocation larger than the region size).
112 kRegionStateLargeTail, // Large tail (non-first regions of a large allocation).
113 };
114
115 template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal();
116 template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800117 uint64_t GetBytesAllocated() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800118 return GetBytesAllocatedInternal<RegionType::kRegionTypeAll>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800119 }
120 uint64_t GetObjectsAllocated() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800121 return GetObjectsAllocatedInternal<RegionType::kRegionTypeAll>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800122 }
123 uint64_t GetBytesAllocatedInFromSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800124 return GetBytesAllocatedInternal<RegionType::kRegionTypeFromSpace>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800125 }
126 uint64_t GetObjectsAllocatedInFromSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800127 return GetObjectsAllocatedInternal<RegionType::kRegionTypeFromSpace>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800128 }
129 uint64_t GetBytesAllocatedInUnevacFromSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800130 return GetBytesAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800131 }
132 uint64_t GetObjectsAllocatedInUnevacFromSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800133 return GetObjectsAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800134 }
135
136 bool CanMoveObjects() const OVERRIDE {
137 return true;
138 }
139
140 bool Contains(const mirror::Object* obj) const {
141 const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
142 return byte_obj >= Begin() && byte_obj < Limit();
143 }
144
145 RegionSpace* AsRegionSpace() OVERRIDE {
146 return this;
147 }
148
149 // Go through all of the blocks and visit the continuous objects.
150 void Walk(ObjectCallback* callback, void* arg)
151 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
152 WalkInternal<false>(callback, arg);
153 }
154
155 void WalkToSpace(ObjectCallback* callback, void* arg)
156 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
157 WalkInternal<true>(callback, arg);
158 }
159
160 accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
161 return nullptr;
162 }
163 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
164 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
165
166 // Object alignment within the space.
167 static constexpr size_t kAlignment = kObjectAlignment;
168 // The region size.
169 static constexpr size_t kRegionSize = 1 * MB;
170
171 bool IsInFromSpace(mirror::Object* ref) {
172 if (HasAddress(ref)) {
173 Region* r = RefToRegionUnlocked(ref);
174 return r->IsInFromSpace();
175 }
176 return false;
177 }
178
179 bool IsInUnevacFromSpace(mirror::Object* ref) {
180 if (HasAddress(ref)) {
181 Region* r = RefToRegionUnlocked(ref);
182 return r->IsInUnevacFromSpace();
183 }
184 return false;
185 }
186
187 bool IsInToSpace(mirror::Object* ref) {
188 if (HasAddress(ref)) {
189 Region* r = RefToRegionUnlocked(ref);
190 return r->IsInToSpace();
191 }
192 return false;
193 }
194
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800195 RegionType GetRegionType(mirror::Object* ref) {
196 if (HasAddress(ref)) {
197 Region* r = RefToRegionUnlocked(ref);
198 return r->Type();
199 }
200 return RegionType::kRegionTypeNone;
201 }
202
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800203 void SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all)
204 LOCKS_EXCLUDED(region_lock_);
205
206 size_t FromSpaceSize();
207 size_t UnevacFromSpaceSize();
208 size_t ToSpaceSize();
209 void ClearFromSpace();
210
211 void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800212 Region* reg = RefToRegionUnlocked(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800213 reg->AddLiveBytes(alloc_size);
214 }
215
216 void AssertAllRegionLiveBytesZeroOrCleared();
217
218 void RecordAlloc(mirror::Object* ref);
219 bool AllocNewTlab(Thread* self);
220
221 uint32_t Time() {
222 return time_;
223 }
224
225 private:
226 RegionSpace(const std::string& name, MemMap* mem_map);
227
228 template<bool kToSpaceOnly>
229 void WalkInternal(ObjectCallback* callback, void* arg) NO_THREAD_SAFETY_ANALYSIS;
230
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800231 class Region {
232 public:
233 Region()
234 : idx_(static_cast<size_t>(-1)),
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800235 begin_(nullptr), top_(nullptr), end_(nullptr),
236 state_(RegionState::kRegionStateAllocated), type_(RegionType::kRegionTypeToSpace),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800237 objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
238 is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {}
239
240 Region(size_t idx, uint8_t* begin, uint8_t* end)
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800241 : idx_(idx), begin_(begin), top_(begin), end_(end),
242 state_(RegionState::kRegionStateFree), type_(RegionType::kRegionTypeNone),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800243 objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
244 is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {
245 DCHECK_LT(begin, end);
246 DCHECK_EQ(static_cast<size_t>(end - begin), kRegionSize);
247 }
248
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800249 RegionState State() const {
250 return state_;
251 }
252
253 RegionType Type() const {
254 return type_;
255 }
256
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800257 void Clear() {
258 top_ = begin_;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800259 state_ = RegionState::kRegionStateFree;
260 type_ = RegionType::kRegionTypeNone;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800261 objects_allocated_ = 0;
262 alloc_time_ = 0;
263 live_bytes_ = static_cast<size_t>(-1);
264 if (!kMadviseZeroes) {
265 memset(begin_, 0, end_ - begin_);
266 }
267 madvise(begin_, end_ - begin_, MADV_DONTNEED);
268 is_newly_allocated_ = false;
269 is_a_tlab_ = false;
270 thread_ = nullptr;
271 }
272
273 ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700274 size_t* usable_size,
275 size_t* bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800276
277 bool IsFree() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800278 bool is_free = state_ == RegionState::kRegionStateFree;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800279 if (is_free) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800280 DCHECK(IsInNoSpace());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800281 DCHECK_EQ(begin_, top_);
282 DCHECK_EQ(objects_allocated_, 0U);
283 }
284 return is_free;
285 }
286
287 // Given a free region, declare it non-free (allocated).
288 void Unfree(uint32_t alloc_time) {
289 DCHECK(IsFree());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800290 state_ = RegionState::kRegionStateAllocated;
291 type_ = RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800292 alloc_time_ = alloc_time;
293 }
294
295 void UnfreeLarge(uint32_t alloc_time) {
296 DCHECK(IsFree());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800297 state_ = RegionState::kRegionStateLarge;
298 type_ = RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800299 alloc_time_ = alloc_time;
300 }
301
302 void UnfreeLargeTail(uint32_t alloc_time) {
303 DCHECK(IsFree());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800304 state_ = RegionState::kRegionStateLargeTail;
305 type_ = RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800306 alloc_time_ = alloc_time;
307 }
308
309 void SetNewlyAllocated() {
310 is_newly_allocated_ = true;
311 }
312
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800313 // Non-large, non-large-tail allocated.
314 bool IsAllocated() const {
315 return state_ == RegionState::kRegionStateAllocated;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800316 }
317
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800318 // Large allocated.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800319 bool IsLarge() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800320 bool is_large = state_ == RegionState::kRegionStateLarge;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800321 if (is_large) {
322 DCHECK_LT(begin_ + 1 * MB, top_);
323 }
324 return is_large;
325 }
326
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800327 // Large-tail allocated.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800328 bool IsLargeTail() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800329 bool is_large_tail = state_ == RegionState::kRegionStateLargeTail;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800330 if (is_large_tail) {
331 DCHECK_EQ(begin_, top_);
332 }
333 return is_large_tail;
334 }
335
336 size_t Idx() const {
337 return idx_;
338 }
339
340 bool IsInFromSpace() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800341 return type_ == RegionType::kRegionTypeFromSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800342 }
343
344 bool IsInToSpace() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800345 return type_ == RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800346 }
347
348 bool IsInUnevacFromSpace() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800349 return type_ == RegionType::kRegionTypeUnevacFromSpace;
350 }
351
352 bool IsInNoSpace() const {
353 return type_ == RegionType::kRegionTypeNone;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800354 }
355
356 void SetAsFromSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800357 DCHECK(!IsFree() && IsInToSpace());
358 type_ = RegionType::kRegionTypeFromSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800359 live_bytes_ = static_cast<size_t>(-1);
360 }
361
362 void SetAsUnevacFromSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800363 DCHECK(!IsFree() && IsInToSpace());
364 type_ = RegionType::kRegionTypeUnevacFromSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800365 live_bytes_ = 0U;
366 }
367
368 void SetUnevacFromSpaceAsToSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800369 DCHECK(!IsFree() && IsInUnevacFromSpace());
370 type_ = RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800371 }
372
373 ALWAYS_INLINE bool ShouldBeEvacuated();
374
375 void AddLiveBytes(size_t live_bytes) {
376 DCHECK(IsInUnevacFromSpace());
377 DCHECK(!IsLargeTail());
378 DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
379 live_bytes_ += live_bytes;
380 DCHECK_LE(live_bytes_, BytesAllocated());
381 }
382
383 size_t LiveBytes() const {
384 return live_bytes_;
385 }
386
387 uint GetLivePercent() const {
388 DCHECK(IsInToSpace());
389 DCHECK(!IsLargeTail());
390 DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
391 DCHECK_LE(live_bytes_, BytesAllocated());
392 size_t bytes_allocated = RoundUp(BytesAllocated(), kRegionSize);
393 DCHECK_GE(bytes_allocated, 0U);
394 uint result = (live_bytes_ * 100U) / bytes_allocated;
395 DCHECK_LE(result, 100U);
396 return result;
397 }
398
399 size_t BytesAllocated() const {
400 if (IsLarge()) {
401 DCHECK_LT(begin_ + kRegionSize, top_);
402 return static_cast<size_t>(top_ - begin_);
403 } else if (IsLargeTail()) {
404 DCHECK_EQ(begin_, top_);
405 return 0;
406 } else {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800407 DCHECK(IsAllocated()) << static_cast<uint>(state_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800408 DCHECK_LE(begin_, top_);
409 size_t bytes = static_cast<size_t>(top_ - begin_);
410 DCHECK_LE(bytes, kRegionSize);
411 return bytes;
412 }
413 }
414
415 size_t ObjectsAllocated() const {
416 if (IsLarge()) {
417 DCHECK_LT(begin_ + 1 * MB, top_);
418 DCHECK_EQ(objects_allocated_, 0U);
419 return 1;
420 } else if (IsLargeTail()) {
421 DCHECK_EQ(begin_, top_);
422 DCHECK_EQ(objects_allocated_, 0U);
423 return 0;
424 } else {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800425 DCHECK(IsAllocated()) << static_cast<uint>(state_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800426 return objects_allocated_;
427 }
428 }
429
430 uint8_t* Begin() const {
431 return begin_;
432 }
433
434 uint8_t* Top() const {
435 return top_;
436 }
437
438 void SetTop(uint8_t* new_top) {
439 top_ = new_top;
440 }
441
442 uint8_t* End() const {
443 return end_;
444 }
445
446 bool Contains(mirror::Object* ref) const {
447 return begin_ <= reinterpret_cast<uint8_t*>(ref) && reinterpret_cast<uint8_t*>(ref) < end_;
448 }
449
450 void Dump(std::ostream& os) const;
451
452 void RecordThreadLocalAllocations(size_t num_objects, size_t num_bytes) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800453 DCHECK(IsAllocated());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800454 DCHECK_EQ(objects_allocated_, 0U);
455 DCHECK_EQ(top_, end_);
456 objects_allocated_ = num_objects;
457 top_ = begin_ + num_bytes;
458 DCHECK_EQ(top_, end_);
459 }
460
461 private:
462 size_t idx_; // The region's index in the region space.
463 uint8_t* begin_; // The begin address of the region.
464 // Can't use Atomic<uint8_t*> as Atomic's copy operator is implicitly deleted.
465 uint8_t* top_; // The current position of the allocation.
466 uint8_t* end_; // The end address of the region.
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800467 RegionState state_; // The region state (see RegionState).
468 RegionType type_; // The region type (see RegionType).
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800469 uint64_t objects_allocated_; // The number of objects allocated.
470 uint32_t alloc_time_; // The allocation time of the region.
471 size_t live_bytes_; // The live bytes. Used to compute the live percent.
472 bool is_newly_allocated_; // True if it's allocated after the last collection.
473 bool is_a_tlab_; // True if it's a tlab.
474 Thread* thread_; // The owning thread if it's a tlab.
475
476 friend class RegionSpace;
477 };
478
479 Region* RefToRegion(mirror::Object* ref) LOCKS_EXCLUDED(region_lock_) {
480 MutexLock mu(Thread::Current(), region_lock_);
481 return RefToRegionLocked(ref);
482 }
483
484 Region* RefToRegionUnlocked(mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
485 // For a performance reason (this is frequently called via
486 // IsInFromSpace() etc.) we avoid taking a lock here. Note that
487 // since we only change a region from to-space to from-space only
488 // during a pause (SetFromSpace()) and from from-space to free
489 // (after GC is done) as long as ref is a valid reference into an
490 // allocated region, it's safe to access the region state without
491 // the lock.
492 return RefToRegionLocked(ref);
493 }
494
495 Region* RefToRegionLocked(mirror::Object* ref) EXCLUSIVE_LOCKS_REQUIRED(region_lock_) {
496 DCHECK(HasAddress(ref));
497 uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
498 size_t reg_idx = offset / kRegionSize;
499 DCHECK_LT(reg_idx, num_regions_);
500 Region* reg = &regions_[reg_idx];
501 DCHECK_EQ(reg->Idx(), reg_idx);
502 DCHECK(reg->Contains(ref));
503 return reg;
504 }
505
506 mirror::Object* GetNextObject(mirror::Object* obj)
507 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
508
509 Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
510
511 uint32_t time_; // The time as the number of collections since the startup.
512 size_t num_regions_; // The number of regions in this space.
513 size_t num_non_free_regions_; // The number of non-free regions in this space.
514 std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
515 // The pointer to the region array.
516 Region* current_region_; // The region that's being allocated currently.
517 Region* evac_region_; // The region that's being evacuated to currently.
518 Region full_region_; // The dummy/sentinel region that looks full.
519
520 DISALLOW_COPY_AND_ASSIGN(RegionSpace);
521};
522
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800523std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionState& value);
524std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionType& value);
525
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800526} // namespace space
527} // namespace gc
528} // namespace art
529
530#endif // ART_RUNTIME_GC_SPACE_REGION_SPACE_H_