blob: b4a043f3ef9c8749541bb95f452469ef29909565 [file] [log] [blame]
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
19
20#include "object_callbacks.h"
21#include "space.h"
22#include "gc/accounting/read_barrier_table.h"
23
24namespace art {
25namespace gc {
26namespace space {
27
28// A space that consists of equal-sized regions.
29class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
30 public:
31 typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
32
33 SpaceType GetType() const OVERRIDE {
34 return kSpaceTypeRegionSpace;
35 }
36
37 // Create a region space with the requested sizes. The requested base address is not
38 // guaranteed to be granted, if it is required, the caller should call Begin on the returned
39 // space to confirm the request was granted.
40 static RegionSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
41
42 // Allocate num_bytes, returns nullptr if the space is full.
43 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
44 size_t* usable_size) OVERRIDE;
45 // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
46 mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
47 size_t* usable_size)
48 OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
49 // The main allocation routine.
50 template<bool kForEvac>
51 ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
52 size_t* usable_size);
53 // Allocate/free large objects (objects that are larger than the region size.)
54 template<bool kForEvac>
55 mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size);
56 void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated);
57
58 // Return the storage space required by obj.
59 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
60 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
61 return AllocationSizeNonvirtual(obj, usable_size);
62 }
63 size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
64 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
65
66 size_t Free(Thread*, mirror::Object*) OVERRIDE {
67 UNIMPLEMENTED(FATAL);
68 return 0;
69 }
70 size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
71 UNIMPLEMENTED(FATAL);
72 return 0;
73 }
74 accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
75 // No live bitmap.
76 return nullptr;
77 }
78 accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
79 // No mark bitmap.
80 return nullptr;
81 }
82
83 void Clear() OVERRIDE LOCKS_EXCLUDED(region_lock_);
84
85 void Dump(std::ostream& os) const;
86 void DumpRegions(std::ostream& os);
87 void DumpNonFreeRegions(std::ostream& os);
88
89 void RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(region_lock_);
90 void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(region_lock_);
91 void RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
92 Locks::thread_list_lock_);
93 void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(region_lock_);
94 void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
95 Locks::thread_list_lock_);
96
97 enum SubSpaceType {
98 kAllSpaces, // All spaces.
99 kFromSpace, // From-space. To be evacuated.
100 kUnevacFromSpace, // Unevacuated from-space. Not to be evacuated.
101 kToSpace, // To-space.
102 };
103
104 template<SubSpaceType kSubSpaceType> uint64_t GetBytesAllocatedInternal();
105 template<SubSpaceType kSubSpaceType> uint64_t GetObjectsAllocatedInternal();
106 uint64_t GetBytesAllocated() {
107 return GetBytesAllocatedInternal<kAllSpaces>();
108 }
109 uint64_t GetObjectsAllocated() {
110 return GetObjectsAllocatedInternal<kAllSpaces>();
111 }
112 uint64_t GetBytesAllocatedInFromSpace() {
113 return GetBytesAllocatedInternal<kFromSpace>();
114 }
115 uint64_t GetObjectsAllocatedInFromSpace() {
116 return GetObjectsAllocatedInternal<kFromSpace>();
117 }
118 uint64_t GetBytesAllocatedInUnevacFromSpace() {
119 return GetBytesAllocatedInternal<kUnevacFromSpace>();
120 }
121 uint64_t GetObjectsAllocatedInUnevacFromSpace() {
122 return GetObjectsAllocatedInternal<kUnevacFromSpace>();
123 }
124
125 bool CanMoveObjects() const OVERRIDE {
126 return true;
127 }
128
129 bool Contains(const mirror::Object* obj) const {
130 const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
131 return byte_obj >= Begin() && byte_obj < Limit();
132 }
133
134 RegionSpace* AsRegionSpace() OVERRIDE {
135 return this;
136 }
137
138 // Go through all of the blocks and visit the continuous objects.
139 void Walk(ObjectCallback* callback, void* arg)
140 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
141 WalkInternal<false>(callback, arg);
142 }
143
144 void WalkToSpace(ObjectCallback* callback, void* arg)
145 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
146 WalkInternal<true>(callback, arg);
147 }
148
149 accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
150 return nullptr;
151 }
152 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
153 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
154
155 // Object alignment within the space.
156 static constexpr size_t kAlignment = kObjectAlignment;
157 // The region size.
158 static constexpr size_t kRegionSize = 1 * MB;
159
160 bool IsInFromSpace(mirror::Object* ref) {
161 if (HasAddress(ref)) {
162 Region* r = RefToRegionUnlocked(ref);
163 return r->IsInFromSpace();
164 }
165 return false;
166 }
167
168 bool IsInUnevacFromSpace(mirror::Object* ref) {
169 if (HasAddress(ref)) {
170 Region* r = RefToRegionUnlocked(ref);
171 return r->IsInUnevacFromSpace();
172 }
173 return false;
174 }
175
176 bool IsInToSpace(mirror::Object* ref) {
177 if (HasAddress(ref)) {
178 Region* r = RefToRegionUnlocked(ref);
179 return r->IsInToSpace();
180 }
181 return false;
182 }
183
184 void SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all)
185 LOCKS_EXCLUDED(region_lock_);
186
187 size_t FromSpaceSize();
188 size_t UnevacFromSpaceSize();
189 size_t ToSpaceSize();
190 void ClearFromSpace();
191
192 void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
193 Region* reg = RefToRegion(ref);
194 reg->AddLiveBytes(alloc_size);
195 }
196
197 void AssertAllRegionLiveBytesZeroOrCleared();
198
199 void RecordAlloc(mirror::Object* ref);
200 bool AllocNewTlab(Thread* self);
201
202 uint32_t Time() {
203 return time_;
204 }
205
206 private:
207 RegionSpace(const std::string& name, MemMap* mem_map);
208
209 template<bool kToSpaceOnly>
210 void WalkInternal(ObjectCallback* callback, void* arg) NO_THREAD_SAFETY_ANALYSIS;
211
212 enum RegionState {
213 kRegionFree, // Free region.
214 kRegionToSpace, // To-space region.
215 kRegionFromSpace, // From-space region. To be evacuated.
216 kRegionUnevacFromSpace, // Unevacuated from-space region. Not to be evacuated.
217 kRegionLargeToSpace, // Large (allocation larger than the region size) to-space.
218 kRegionLargeFromSpace, // Large from-space. To be evacuated.
219 kRegionLargeUnevacFromSpace, // Large unevacuated from-space.
220 kRegionLargeTailToSpace, // Large tail (non-first regions of a large allocation).
221 kRegionLargeTailFromSpace, // Large tail from-space.
222 kRegionLargeTailUnevacFromSpace, // Large tail unevacuated from-space.
223 };
224
225 class Region {
226 public:
227 Region()
228 : idx_(static_cast<size_t>(-1)),
229 begin_(nullptr), top_(nullptr), end_(nullptr), state_(kRegionToSpace),
230 objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
231 is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {}
232
233 Region(size_t idx, uint8_t* begin, uint8_t* end)
234 : idx_(idx), begin_(begin), top_(begin), end_(end), state_(kRegionFree),
235 objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
236 is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {
237 DCHECK_LT(begin, end);
238 DCHECK_EQ(static_cast<size_t>(end - begin), kRegionSize);
239 }
240
241 void Clear() {
242 top_ = begin_;
243 state_ = kRegionFree;
244 objects_allocated_ = 0;
245 alloc_time_ = 0;
246 live_bytes_ = static_cast<size_t>(-1);
247 if (!kMadviseZeroes) {
248 memset(begin_, 0, end_ - begin_);
249 }
250 madvise(begin_, end_ - begin_, MADV_DONTNEED);
251 is_newly_allocated_ = false;
252 is_a_tlab_ = false;
253 thread_ = nullptr;
254 }
255
256 ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes, size_t* bytes_allocated,
257 size_t* usable_size);
258
259 bool IsFree() const {
260 bool is_free = state_ == kRegionFree;
261 if (is_free) {
262 DCHECK_EQ(begin_, top_);
263 DCHECK_EQ(objects_allocated_, 0U);
264 }
265 return is_free;
266 }
267
268 // Given a free region, declare it non-free (allocated).
269 void Unfree(uint32_t alloc_time) {
270 DCHECK(IsFree());
271 state_ = kRegionToSpace;
272 alloc_time_ = alloc_time;
273 }
274
275 void UnfreeLarge(uint32_t alloc_time) {
276 DCHECK(IsFree());
277 state_ = kRegionLargeToSpace;
278 alloc_time_ = alloc_time;
279 }
280
281 void UnfreeLargeTail(uint32_t alloc_time) {
282 DCHECK(IsFree());
283 state_ = kRegionLargeTailToSpace;
284 alloc_time_ = alloc_time;
285 }
286
287 void SetNewlyAllocated() {
288 is_newly_allocated_ = true;
289 }
290
291 // Non-large, non-large-tail.
292 bool IsNormal() const {
293 return state_ == kRegionToSpace || state_ == kRegionFromSpace ||
294 state_ == kRegionUnevacFromSpace;
295 }
296
297 bool IsLarge() const {
298 bool is_large = state_ == kRegionLargeToSpace || state_ == kRegionLargeFromSpace ||
299 state_ == kRegionLargeUnevacFromSpace;
300 if (is_large) {
301 DCHECK_LT(begin_ + 1 * MB, top_);
302 }
303 return is_large;
304 }
305
306 bool IsLargeTail() const {
307 bool is_large_tail = state_ == kRegionLargeTailToSpace ||
308 state_ == kRegionLargeTailFromSpace ||
309 state_ == kRegionLargeTailUnevacFromSpace;
310 if (is_large_tail) {
311 DCHECK_EQ(begin_, top_);
312 }
313 return is_large_tail;
314 }
315
316 size_t Idx() const {
317 return idx_;
318 }
319
320 bool IsInFromSpace() const {
321 return state_ == kRegionFromSpace || state_ == kRegionLargeFromSpace ||
322 state_ == kRegionLargeTailFromSpace;
323 }
324
325 bool IsInToSpace() const {
326 return state_ == kRegionToSpace || state_ == kRegionLargeToSpace ||
327 state_ == kRegionLargeTailToSpace;
328 }
329
330 bool IsInUnevacFromSpace() const {
331 return state_ == kRegionUnevacFromSpace || state_ == kRegionLargeUnevacFromSpace ||
332 state_ == kRegionLargeTailUnevacFromSpace;
333 }
334
335 void SetAsFromSpace() {
336 switch (state_) {
337 case kRegionToSpace:
338 state_ = kRegionFromSpace;
339 break;
340 case kRegionLargeToSpace:
341 state_ = kRegionLargeFromSpace;
342 break;
343 case kRegionLargeTailToSpace:
344 state_ = kRegionLargeTailFromSpace;
345 break;
346 default:
347 LOG(FATAL) << "Unexpected region state : " << static_cast<uint>(state_)
348 << " idx=" << idx_;
349 }
350 live_bytes_ = static_cast<size_t>(-1);
351 }
352
353 void SetAsUnevacFromSpace() {
354 switch (state_) {
355 case kRegionToSpace:
356 state_ = kRegionUnevacFromSpace;
357 break;
358 case kRegionLargeToSpace:
359 state_ = kRegionLargeUnevacFromSpace;
360 break;
361 case kRegionLargeTailToSpace:
362 state_ = kRegionLargeTailUnevacFromSpace;
363 break;
364 default:
365 LOG(FATAL) << "Unexpected region state : " << static_cast<uint>(state_)
366 << " idx=" << idx_;
367 }
368 live_bytes_ = 0U;
369 }
370
371 void SetUnevacFromSpaceAsToSpace() {
372 switch (state_) {
373 case kRegionUnevacFromSpace:
374 state_ = kRegionToSpace;
375 break;
376 case kRegionLargeUnevacFromSpace:
377 state_ = kRegionLargeToSpace;
378 break;
379 case kRegionLargeTailUnevacFromSpace:
380 state_ = kRegionLargeTailToSpace;
381 break;
382 default:
383 LOG(FATAL) << "Unexpected region state : " << static_cast<uint>(state_)
384 << " idx=" << idx_;
385 }
386 }
387
388 ALWAYS_INLINE bool ShouldBeEvacuated();
389
390 void AddLiveBytes(size_t live_bytes) {
391 DCHECK(IsInUnevacFromSpace());
392 DCHECK(!IsLargeTail());
393 DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
394 live_bytes_ += live_bytes;
395 DCHECK_LE(live_bytes_, BytesAllocated());
396 }
397
398 size_t LiveBytes() const {
399 return live_bytes_;
400 }
401
402 uint GetLivePercent() const {
403 DCHECK(IsInToSpace());
404 DCHECK(!IsLargeTail());
405 DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
406 DCHECK_LE(live_bytes_, BytesAllocated());
407 size_t bytes_allocated = RoundUp(BytesAllocated(), kRegionSize);
408 DCHECK_GE(bytes_allocated, 0U);
409 uint result = (live_bytes_ * 100U) / bytes_allocated;
410 DCHECK_LE(result, 100U);
411 return result;
412 }
413
414 size_t BytesAllocated() const {
415 if (IsLarge()) {
416 DCHECK_LT(begin_ + kRegionSize, top_);
417 return static_cast<size_t>(top_ - begin_);
418 } else if (IsLargeTail()) {
419 DCHECK_EQ(begin_, top_);
420 return 0;
421 } else {
422 DCHECK(IsNormal()) << static_cast<uint>(state_);
423 DCHECK_LE(begin_, top_);
424 size_t bytes = static_cast<size_t>(top_ - begin_);
425 DCHECK_LE(bytes, kRegionSize);
426 return bytes;
427 }
428 }
429
430 size_t ObjectsAllocated() const {
431 if (IsLarge()) {
432 DCHECK_LT(begin_ + 1 * MB, top_);
433 DCHECK_EQ(objects_allocated_, 0U);
434 return 1;
435 } else if (IsLargeTail()) {
436 DCHECK_EQ(begin_, top_);
437 DCHECK_EQ(objects_allocated_, 0U);
438 return 0;
439 } else {
440 DCHECK(IsNormal()) << static_cast<uint>(state_);
441 return objects_allocated_;
442 }
443 }
444
445 uint8_t* Begin() const {
446 return begin_;
447 }
448
449 uint8_t* Top() const {
450 return top_;
451 }
452
453 void SetTop(uint8_t* new_top) {
454 top_ = new_top;
455 }
456
457 uint8_t* End() const {
458 return end_;
459 }
460
461 bool Contains(mirror::Object* ref) const {
462 return begin_ <= reinterpret_cast<uint8_t*>(ref) && reinterpret_cast<uint8_t*>(ref) < end_;
463 }
464
465 void Dump(std::ostream& os) const;
466
467 void RecordThreadLocalAllocations(size_t num_objects, size_t num_bytes) {
468 DCHECK(IsNormal());
469 DCHECK_EQ(objects_allocated_, 0U);
470 DCHECK_EQ(top_, end_);
471 objects_allocated_ = num_objects;
472 top_ = begin_ + num_bytes;
473 DCHECK_EQ(top_, end_);
474 }
475
476 private:
477 size_t idx_; // The region's index in the region space.
478 uint8_t* begin_; // The begin address of the region.
479 // Can't use Atomic<uint8_t*> as Atomic's copy operator is implicitly deleted.
480 uint8_t* top_; // The current position of the allocation.
481 uint8_t* end_; // The end address of the region.
482 uint8_t state_; // The region state (see RegionState).
483 uint64_t objects_allocated_; // The number of objects allocated.
484 uint32_t alloc_time_; // The allocation time of the region.
485 size_t live_bytes_; // The live bytes. Used to compute the live percent.
486 bool is_newly_allocated_; // True if it's allocated after the last collection.
487 bool is_a_tlab_; // True if it's a tlab.
488 Thread* thread_; // The owning thread if it's a tlab.
489
490 friend class RegionSpace;
491 };
492
493 Region* RefToRegion(mirror::Object* ref) LOCKS_EXCLUDED(region_lock_) {
494 MutexLock mu(Thread::Current(), region_lock_);
495 return RefToRegionLocked(ref);
496 }
497
498 Region* RefToRegionUnlocked(mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
499 // For a performance reason (this is frequently called via
500 // IsInFromSpace() etc.) we avoid taking a lock here. Note that
501 // since we only change a region from to-space to from-space only
502 // during a pause (SetFromSpace()) and from from-space to free
503 // (after GC is done) as long as ref is a valid reference into an
504 // allocated region, it's safe to access the region state without
505 // the lock.
506 return RefToRegionLocked(ref);
507 }
508
509 Region* RefToRegionLocked(mirror::Object* ref) EXCLUSIVE_LOCKS_REQUIRED(region_lock_) {
510 DCHECK(HasAddress(ref));
511 uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
512 size_t reg_idx = offset / kRegionSize;
513 DCHECK_LT(reg_idx, num_regions_);
514 Region* reg = &regions_[reg_idx];
515 DCHECK_EQ(reg->Idx(), reg_idx);
516 DCHECK(reg->Contains(ref));
517 return reg;
518 }
519
520 mirror::Object* GetNextObject(mirror::Object* obj)
521 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
522
523 Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
524
525 uint32_t time_; // The time as the number of collections since the startup.
526 size_t num_regions_; // The number of regions in this space.
527 size_t num_non_free_regions_; // The number of non-free regions in this space.
528 std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
529 // The pointer to the region array.
530 Region* current_region_; // The region that's being allocated currently.
531 Region* evac_region_; // The region that's being evacuated to currently.
532 Region full_region_; // The dummy/sentinel region that looks full.
533
534 DISALLOW_COPY_AND_ASSIGN(RegionSpace);
535};
536
537} // namespace space
538} // namespace gc
539} // namespace art
540
541#endif // ART_RUNTIME_GC_SPACE_REGION_SPACE_H_