blob: 416054716c3c30aa2c45ae89e1d482d93a4d4a57 [file] [log] [blame]
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
19
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -080020#include "gc/accounting/read_barrier_table.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080021#include "object_callbacks.h"
22#include "space.h"
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -080023#include "thread.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080024
25namespace art {
26namespace gc {
27namespace space {
28
29// A space that consists of equal-sized regions.
30class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
31 public:
32 typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
33
34 SpaceType GetType() const OVERRIDE {
35 return kSpaceTypeRegionSpace;
36 }
37
38 // Create a region space with the requested sizes. The requested base address is not
39 // guaranteed to be granted, if it is required, the caller should call Begin on the returned
40 // space to confirm the request was granted.
41 static RegionSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
42
43 // Allocate num_bytes, returns nullptr if the space is full.
44 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
45 size_t* usable_size) OVERRIDE;
46 // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
47 mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
48 size_t* usable_size)
49 OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
50 // The main allocation routine.
51 template<bool kForEvac>
52 ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
53 size_t* usable_size);
54 // Allocate/free large objects (objects that are larger than the region size.)
55 template<bool kForEvac>
56 mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size);
57 void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated);
58
59 // Return the storage space required by obj.
60 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
61 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
62 return AllocationSizeNonvirtual(obj, usable_size);
63 }
64 size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
65 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
66
67 size_t Free(Thread*, mirror::Object*) OVERRIDE {
68 UNIMPLEMENTED(FATAL);
69 return 0;
70 }
71 size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
72 UNIMPLEMENTED(FATAL);
73 return 0;
74 }
75 accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
76 // No live bitmap.
77 return nullptr;
78 }
79 accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
80 // No mark bitmap.
81 return nullptr;
82 }
83
84 void Clear() OVERRIDE LOCKS_EXCLUDED(region_lock_);
85
86 void Dump(std::ostream& os) const;
87 void DumpRegions(std::ostream& os);
88 void DumpNonFreeRegions(std::ostream& os);
89
90 void RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(region_lock_);
91 void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(region_lock_);
92 void RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
93 Locks::thread_list_lock_);
94 void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(region_lock_);
95 void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
96 Locks::thread_list_lock_);
97
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -080098 enum class RegionType : uint8_t {
99 kRegionTypeAll, // All types.
100 kRegionTypeFromSpace, // From-space. To be evacuated.
101 kRegionTypeUnevacFromSpace, // Unevacuated from-space. Not to be evacuated.
102 kRegionTypeToSpace, // To-space.
103 kRegionTypeNone, // None.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800104 };
105
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800106 enum class RegionState : uint8_t {
107 kRegionStateFree, // Free region.
108 kRegionStateAllocated, // Allocated region.
109 kRegionStateLarge, // Large allocated (allocation larger than the region size).
110 kRegionStateLargeTail, // Large tail (non-first regions of a large allocation).
111 };
112
113 template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal();
114 template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800115 uint64_t GetBytesAllocated() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800116 return GetBytesAllocatedInternal<RegionType::kRegionTypeAll>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800117 }
118 uint64_t GetObjectsAllocated() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800119 return GetObjectsAllocatedInternal<RegionType::kRegionTypeAll>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800120 }
121 uint64_t GetBytesAllocatedInFromSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800122 return GetBytesAllocatedInternal<RegionType::kRegionTypeFromSpace>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800123 }
124 uint64_t GetObjectsAllocatedInFromSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800125 return GetObjectsAllocatedInternal<RegionType::kRegionTypeFromSpace>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800126 }
127 uint64_t GetBytesAllocatedInUnevacFromSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800128 return GetBytesAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800129 }
130 uint64_t GetObjectsAllocatedInUnevacFromSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800131 return GetObjectsAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800132 }
133
134 bool CanMoveObjects() const OVERRIDE {
135 return true;
136 }
137
138 bool Contains(const mirror::Object* obj) const {
139 const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
140 return byte_obj >= Begin() && byte_obj < Limit();
141 }
142
143 RegionSpace* AsRegionSpace() OVERRIDE {
144 return this;
145 }
146
147 // Go through all of the blocks and visit the continuous objects.
148 void Walk(ObjectCallback* callback, void* arg)
149 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
150 WalkInternal<false>(callback, arg);
151 }
152
153 void WalkToSpace(ObjectCallback* callback, void* arg)
154 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
155 WalkInternal<true>(callback, arg);
156 }
157
158 accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
159 return nullptr;
160 }
161 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
162 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
163
164 // Object alignment within the space.
165 static constexpr size_t kAlignment = kObjectAlignment;
166 // The region size.
167 static constexpr size_t kRegionSize = 1 * MB;
168
169 bool IsInFromSpace(mirror::Object* ref) {
170 if (HasAddress(ref)) {
171 Region* r = RefToRegionUnlocked(ref);
172 return r->IsInFromSpace();
173 }
174 return false;
175 }
176
177 bool IsInUnevacFromSpace(mirror::Object* ref) {
178 if (HasAddress(ref)) {
179 Region* r = RefToRegionUnlocked(ref);
180 return r->IsInUnevacFromSpace();
181 }
182 return false;
183 }
184
185 bool IsInToSpace(mirror::Object* ref) {
186 if (HasAddress(ref)) {
187 Region* r = RefToRegionUnlocked(ref);
188 return r->IsInToSpace();
189 }
190 return false;
191 }
192
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800193 RegionType GetRegionType(mirror::Object* ref) {
194 if (HasAddress(ref)) {
195 Region* r = RefToRegionUnlocked(ref);
196 return r->Type();
197 }
198 return RegionType::kRegionTypeNone;
199 }
200
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800201 void SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all)
202 LOCKS_EXCLUDED(region_lock_);
203
204 size_t FromSpaceSize();
205 size_t UnevacFromSpaceSize();
206 size_t ToSpaceSize();
207 void ClearFromSpace();
208
209 void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800210 Region* reg = RefToRegionUnlocked(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800211 reg->AddLiveBytes(alloc_size);
212 }
213
214 void AssertAllRegionLiveBytesZeroOrCleared();
215
216 void RecordAlloc(mirror::Object* ref);
217 bool AllocNewTlab(Thread* self);
218
219 uint32_t Time() {
220 return time_;
221 }
222
223 private:
224 RegionSpace(const std::string& name, MemMap* mem_map);
225
226 template<bool kToSpaceOnly>
227 void WalkInternal(ObjectCallback* callback, void* arg) NO_THREAD_SAFETY_ANALYSIS;
228
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800229 class Region {
230 public:
231 Region()
232 : idx_(static_cast<size_t>(-1)),
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800233 begin_(nullptr), top_(nullptr), end_(nullptr),
234 state_(RegionState::kRegionStateAllocated), type_(RegionType::kRegionTypeToSpace),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800235 objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
236 is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {}
237
238 Region(size_t idx, uint8_t* begin, uint8_t* end)
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800239 : idx_(idx), begin_(begin), top_(begin), end_(end),
240 state_(RegionState::kRegionStateFree), type_(RegionType::kRegionTypeNone),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800241 objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
242 is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {
243 DCHECK_LT(begin, end);
244 DCHECK_EQ(static_cast<size_t>(end - begin), kRegionSize);
245 }
246
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800247 RegionState State() const {
248 return state_;
249 }
250
251 RegionType Type() const {
252 return type_;
253 }
254
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800255 void Clear() {
256 top_ = begin_;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800257 state_ = RegionState::kRegionStateFree;
258 type_ = RegionType::kRegionTypeNone;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800259 objects_allocated_ = 0;
260 alloc_time_ = 0;
261 live_bytes_ = static_cast<size_t>(-1);
262 if (!kMadviseZeroes) {
263 memset(begin_, 0, end_ - begin_);
264 }
265 madvise(begin_, end_ - begin_, MADV_DONTNEED);
266 is_newly_allocated_ = false;
267 is_a_tlab_ = false;
268 thread_ = nullptr;
269 }
270
271 ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes, size_t* bytes_allocated,
272 size_t* usable_size);
273
274 bool IsFree() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800275 bool is_free = state_ == RegionState::kRegionStateFree;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800276 if (is_free) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800277 DCHECK(IsInNoSpace());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800278 DCHECK_EQ(begin_, top_);
279 DCHECK_EQ(objects_allocated_, 0U);
280 }
281 return is_free;
282 }
283
284 // Given a free region, declare it non-free (allocated).
285 void Unfree(uint32_t alloc_time) {
286 DCHECK(IsFree());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800287 state_ = RegionState::kRegionStateAllocated;
288 type_ = RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800289 alloc_time_ = alloc_time;
290 }
291
292 void UnfreeLarge(uint32_t alloc_time) {
293 DCHECK(IsFree());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800294 state_ = RegionState::kRegionStateLarge;
295 type_ = RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800296 alloc_time_ = alloc_time;
297 }
298
299 void UnfreeLargeTail(uint32_t alloc_time) {
300 DCHECK(IsFree());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800301 state_ = RegionState::kRegionStateLargeTail;
302 type_ = RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800303 alloc_time_ = alloc_time;
304 }
305
306 void SetNewlyAllocated() {
307 is_newly_allocated_ = true;
308 }
309
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800310 // Non-large, non-large-tail allocated.
311 bool IsAllocated() const {
312 return state_ == RegionState::kRegionStateAllocated;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800313 }
314
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800315 // Large allocated.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800316 bool IsLarge() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800317 bool is_large = state_ == RegionState::kRegionStateLarge;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800318 if (is_large) {
319 DCHECK_LT(begin_ + 1 * MB, top_);
320 }
321 return is_large;
322 }
323
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800324 // Large-tail allocated.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800325 bool IsLargeTail() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800326 bool is_large_tail = state_ == RegionState::kRegionStateLargeTail;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800327 if (is_large_tail) {
328 DCHECK_EQ(begin_, top_);
329 }
330 return is_large_tail;
331 }
332
333 size_t Idx() const {
334 return idx_;
335 }
336
337 bool IsInFromSpace() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800338 return type_ == RegionType::kRegionTypeFromSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800339 }
340
341 bool IsInToSpace() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800342 return type_ == RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800343 }
344
345 bool IsInUnevacFromSpace() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800346 return type_ == RegionType::kRegionTypeUnevacFromSpace;
347 }
348
349 bool IsInNoSpace() const {
350 return type_ == RegionType::kRegionTypeNone;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800351 }
352
353 void SetAsFromSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800354 DCHECK(!IsFree() && IsInToSpace());
355 type_ = RegionType::kRegionTypeFromSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800356 live_bytes_ = static_cast<size_t>(-1);
357 }
358
359 void SetAsUnevacFromSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800360 DCHECK(!IsFree() && IsInToSpace());
361 type_ = RegionType::kRegionTypeUnevacFromSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800362 live_bytes_ = 0U;
363 }
364
365 void SetUnevacFromSpaceAsToSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800366 DCHECK(!IsFree() && IsInUnevacFromSpace());
367 type_ = RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800368 }
369
370 ALWAYS_INLINE bool ShouldBeEvacuated();
371
372 void AddLiveBytes(size_t live_bytes) {
373 DCHECK(IsInUnevacFromSpace());
374 DCHECK(!IsLargeTail());
375 DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
376 live_bytes_ += live_bytes;
377 DCHECK_LE(live_bytes_, BytesAllocated());
378 }
379
380 size_t LiveBytes() const {
381 return live_bytes_;
382 }
383
384 uint GetLivePercent() const {
385 DCHECK(IsInToSpace());
386 DCHECK(!IsLargeTail());
387 DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
388 DCHECK_LE(live_bytes_, BytesAllocated());
389 size_t bytes_allocated = RoundUp(BytesAllocated(), kRegionSize);
390 DCHECK_GE(bytes_allocated, 0U);
391 uint result = (live_bytes_ * 100U) / bytes_allocated;
392 DCHECK_LE(result, 100U);
393 return result;
394 }
395
396 size_t BytesAllocated() const {
397 if (IsLarge()) {
398 DCHECK_LT(begin_ + kRegionSize, top_);
399 return static_cast<size_t>(top_ - begin_);
400 } else if (IsLargeTail()) {
401 DCHECK_EQ(begin_, top_);
402 return 0;
403 } else {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800404 DCHECK(IsAllocated()) << static_cast<uint>(state_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800405 DCHECK_LE(begin_, top_);
406 size_t bytes = static_cast<size_t>(top_ - begin_);
407 DCHECK_LE(bytes, kRegionSize);
408 return bytes;
409 }
410 }
411
412 size_t ObjectsAllocated() const {
413 if (IsLarge()) {
414 DCHECK_LT(begin_ + 1 * MB, top_);
415 DCHECK_EQ(objects_allocated_, 0U);
416 return 1;
417 } else if (IsLargeTail()) {
418 DCHECK_EQ(begin_, top_);
419 DCHECK_EQ(objects_allocated_, 0U);
420 return 0;
421 } else {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800422 DCHECK(IsAllocated()) << static_cast<uint>(state_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800423 return objects_allocated_;
424 }
425 }
426
427 uint8_t* Begin() const {
428 return begin_;
429 }
430
431 uint8_t* Top() const {
432 return top_;
433 }
434
435 void SetTop(uint8_t* new_top) {
436 top_ = new_top;
437 }
438
439 uint8_t* End() const {
440 return end_;
441 }
442
443 bool Contains(mirror::Object* ref) const {
444 return begin_ <= reinterpret_cast<uint8_t*>(ref) && reinterpret_cast<uint8_t*>(ref) < end_;
445 }
446
447 void Dump(std::ostream& os) const;
448
449 void RecordThreadLocalAllocations(size_t num_objects, size_t num_bytes) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800450 DCHECK(IsAllocated());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800451 DCHECK_EQ(objects_allocated_, 0U);
452 DCHECK_EQ(top_, end_);
453 objects_allocated_ = num_objects;
454 top_ = begin_ + num_bytes;
455 DCHECK_EQ(top_, end_);
456 }
457
458 private:
459 size_t idx_; // The region's index in the region space.
460 uint8_t* begin_; // The begin address of the region.
461 // Can't use Atomic<uint8_t*> as Atomic's copy operator is implicitly deleted.
462 uint8_t* top_; // The current position of the allocation.
463 uint8_t* end_; // The end address of the region.
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800464 RegionState state_; // The region state (see RegionState).
465 RegionType type_; // The region type (see RegionType).
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800466 uint64_t objects_allocated_; // The number of objects allocated.
467 uint32_t alloc_time_; // The allocation time of the region.
468 size_t live_bytes_; // The live bytes. Used to compute the live percent.
469 bool is_newly_allocated_; // True if it's allocated after the last collection.
470 bool is_a_tlab_; // True if it's a tlab.
471 Thread* thread_; // The owning thread if it's a tlab.
472
473 friend class RegionSpace;
474 };
475
476 Region* RefToRegion(mirror::Object* ref) LOCKS_EXCLUDED(region_lock_) {
477 MutexLock mu(Thread::Current(), region_lock_);
478 return RefToRegionLocked(ref);
479 }
480
481 Region* RefToRegionUnlocked(mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
482 // For a performance reason (this is frequently called via
483 // IsInFromSpace() etc.) we avoid taking a lock here. Note that
484 // since we only change a region from to-space to from-space only
485 // during a pause (SetFromSpace()) and from from-space to free
486 // (after GC is done) as long as ref is a valid reference into an
487 // allocated region, it's safe to access the region state without
488 // the lock.
489 return RefToRegionLocked(ref);
490 }
491
492 Region* RefToRegionLocked(mirror::Object* ref) EXCLUSIVE_LOCKS_REQUIRED(region_lock_) {
493 DCHECK(HasAddress(ref));
494 uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
495 size_t reg_idx = offset / kRegionSize;
496 DCHECK_LT(reg_idx, num_regions_);
497 Region* reg = &regions_[reg_idx];
498 DCHECK_EQ(reg->Idx(), reg_idx);
499 DCHECK(reg->Contains(ref));
500 return reg;
501 }
502
503 mirror::Object* GetNextObject(mirror::Object* obj)
504 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
505
506 Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
507
508 uint32_t time_; // The time as the number of collections since the startup.
509 size_t num_regions_; // The number of regions in this space.
510 size_t num_non_free_regions_; // The number of non-free regions in this space.
511 std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
512 // The pointer to the region array.
513 Region* current_region_; // The region that's being allocated currently.
514 Region* evac_region_; // The region that's being evacuated to currently.
515 Region full_region_; // The dummy/sentinel region that looks full.
516
517 DISALLOW_COPY_AND_ASSIGN(RegionSpace);
518};
519
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800520std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionState& value);
521std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionType& value);
522
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800523} // namespace space
524} // namespace gc
525} // namespace art
526
527#endif // ART_RUNTIME_GC_SPACE_REGION_SPACE_H_