blob: 0e27d8467be076eb205e0051154edcb9ca1e84b5 [file] [log] [blame]
Mathieu Chartier590fee92013-09-13 13:46:47 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
19
Mathieu Chartier83c8ee02014-01-28 14:50:23 -080020#include "object_callbacks.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070021#include "space.h"
22
23namespace art {
24namespace gc {
25
26namespace collector {
27 class MarkSweep;
28} // namespace collector
29
30namespace space {
31
Ian Rogers6fac4472014-02-25 17:01:10 -080032// A bump pointer space allocates by incrementing a pointer, it doesn't provide a free
33// implementation as its intended to be evacuated.
34class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
Mathieu Chartier590fee92013-09-13 13:46:47 -070035 public:
36 typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
37
Ian Rogers6fac4472014-02-25 17:01:10 -080038 SpaceType GetType() const OVERRIDE {
Mathieu Chartier590fee92013-09-13 13:46:47 -070039 return kSpaceTypeBumpPointerSpace;
40 }
41
42 // Create a bump pointer space with the requested sizes. The requested base address is not
43 // guaranteed to be granted, if it is required, the caller should call Begin on the returned
44 // space to confirm the request was granted.
Ian Rogers13735952014-10-08 12:43:28 -070045 static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
Mathieu Chartier31f44142014-04-08 14:40:03 -070046 static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap* mem_map);
Mathieu Chartier590fee92013-09-13 13:46:47 -070047
Mathieu Chartier2cebb242015-04-21 16:50:40 -070048 // Allocate num_bytes, returns null if the space is full.
Ian Rogers6fac4472014-02-25 17:01:10 -080049 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070050 size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
Mathieu Chartier0651d412014-04-29 14:37:57 -070051 // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
52 mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070053 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -070054 OVERRIDE REQUIRES(Locks::mutator_lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -070055
Mathieu Chartier590fee92013-09-13 13:46:47 -070056 mirror::Object* AllocNonvirtual(size_t num_bytes);
Mathieu Chartier692fafd2013-11-29 17:24:40 -080057 mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes);
Mathieu Chartier590fee92013-09-13 13:46:47 -070058
59 // Return the storage space required by obj.
Ian Rogers6fac4472014-02-25 17:01:10 -080060 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
Mathieu Chartier90443472015-07-16 20:32:27 -070061 SHARED_REQUIRES(Locks::mutator_lock_) {
Ian Rogers6fac4472014-02-25 17:01:10 -080062 return AllocationSizeNonvirtual(obj, usable_size);
63 }
Mathieu Chartier590fee92013-09-13 13:46:47 -070064
Mathieu Chartier692fafd2013-11-29 17:24:40 -080065 // NOPS unless we support free lists.
Ian Rogers6fac4472014-02-25 17:01:10 -080066 size_t Free(Thread*, mirror::Object*) OVERRIDE {
Mathieu Chartier590fee92013-09-13 13:46:47 -070067 return 0;
68 }
69
Ian Rogers6fac4472014-02-25 17:01:10 -080070 size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
71 return 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -070072 }
73
Ian Rogers6fac4472014-02-25 17:01:10 -080074 size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
Mathieu Chartier90443472015-07-16 20:32:27 -070075 SHARED_REQUIRES(Locks::mutator_lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -080076
Mathieu Chartier590fee92013-09-13 13:46:47 -070077 // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
78 // maximum reserved size of the heap.
79 void ClearGrowthLimit() {
80 growth_end_ = Limit();
81 }
82
83 // Override capacity so that we only return the possibly limited capacity
84 size_t Capacity() const {
85 return growth_end_ - begin_;
86 }
87
88 // The total amount of memory reserved for the space.
89 size_t NonGrowthLimitCapacity() const {
90 return GetMemMap()->Size();
91 }
92
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -070093 accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
Mathieu Chartier590fee92013-09-13 13:46:47 -070094 return nullptr;
95 }
96
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -070097 accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
Mathieu Chartier590fee92013-09-13 13:46:47 -070098 return nullptr;
99 }
100
Mathieu Chartier31f44142014-04-08 14:40:03 -0700101 // Reset the space to empty.
Mathieu Chartier90443472015-07-16 20:32:27 -0700102 void Clear() OVERRIDE REQUIRES(!block_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700103
104 void Dump(std::ostream& os) const;
105
Mathieu Chartier90443472015-07-16 20:32:27 -0700106 size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!block_lock_);
107 size_t RevokeAllThreadLocalBuffers()
108 REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
109 void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!block_lock_);
110 void AssertAllThreadLocalBuffersAreRevoked()
111 REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700112
Mathieu Chartier90443472015-07-16 20:32:27 -0700113 uint64_t GetBytesAllocated() SHARED_REQUIRES(Locks::mutator_lock_)
114 REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
115 uint64_t GetObjectsAllocated() SHARED_REQUIRES(Locks::mutator_lock_)
116 REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -0800117 bool IsEmpty() const {
118 return Begin() == End();
119 }
120
Mathieu Chartier31f44142014-04-08 14:40:03 -0700121 bool CanMoveObjects() const OVERRIDE {
122 return true;
123 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700124
125 bool Contains(const mirror::Object* obj) const {
Ian Rogers13735952014-10-08 12:43:28 -0700126 const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700127 return byte_obj >= Begin() && byte_obj < End();
128 }
129
130 // TODO: Change this? Mainly used for compacting to a particular region of memory.
Ian Rogers13735952014-10-08 12:43:28 -0700131 BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700132
133 // Return the object which comes after obj, while ensuring alignment.
134 static mirror::Object* GetNextObject(mirror::Object* obj)
Mathieu Chartier90443472015-07-16 20:32:27 -0700135 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700136
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800137 // Allocate a new TLAB, returns false if the allocation failed.
Mathieu Chartier90443472015-07-16 20:32:27 -0700138 bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800139
Ian Rogers6fac4472014-02-25 17:01:10 -0800140 BumpPointerSpace* AsBumpPointerSpace() OVERRIDE {
Mathieu Chartier7410f292013-11-24 13:17:35 -0800141 return this;
142 }
143
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800144 // Go through all of the blocks and visit the continuous objects.
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800145 void Walk(ObjectCallback* callback, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700146 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!block_lock_);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800147
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700148 accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
Ian Rogers6fac4472014-02-25 17:01:10 -0800149
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700150 // Record objects / bytes freed.
151 void RecordFree(int32_t objects, int32_t bytes) {
152 objects_allocated_.FetchAndSubSequentiallyConsistent(objects);
153 bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes);
154 }
155
Mathieu Chartierb363f662014-07-16 13:28:58 -0700156 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
Mathieu Chartier90443472015-07-16 20:32:27 -0700157 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700158
Mathieu Chartier7410f292013-11-24 13:17:35 -0800159 // Object alignment within the space.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800160 static constexpr size_t kAlignment = 8;
161
Mathieu Chartier590fee92013-09-13 13:46:47 -0700162 protected:
163 BumpPointerSpace(const std::string& name, MemMap* mem_map);
164
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800165 // Allocate a raw block of bytes.
Mathieu Chartier90443472015-07-16 20:32:27 -0700166 uint8_t* AllocBlock(size_t bytes) REQUIRES(block_lock_);
167 void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(block_lock_);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800168
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800169 // The main block is an unbounded block where objects go when there are no other blocks. This
170 // enables us to maintain tightly packed objects when you are not using thread local buffers for
Mathieu Chartierfc4c27e2014-02-11 11:05:41 -0800171 // allocation. The main block starts at the space Begin().
Mathieu Chartier90443472015-07-16 20:32:27 -0700172 void UpdateMainBlock() REQUIRES(block_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700173
Ian Rogers13735952014-10-08 12:43:28 -0700174 uint8_t* growth_end_;
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800175 AtomicInteger objects_allocated_; // Accumulated from revoked thread local regions.
176 AtomicInteger bytes_allocated_; // Accumulated from revoked thread local regions.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800177 Mutex block_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Mathieu Chartierfc4c27e2014-02-11 11:05:41 -0800178 // The objects at the start of the space are stored in the main block. The main block doesn't
179 // have a header, this lets us walk empty spaces which are mprotected.
180 size_t main_block_size_ GUARDED_BY(block_lock_);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800181 // The number of blocks in the space, if it is 0 then the space has one long continuous block
182 // which doesn't have an updated header.
183 size_t num_blocks_ GUARDED_BY(block_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700184
185 private:
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800186 struct BlockHeader {
187 size_t size_; // Size of the block in bytes, does not include the header.
188 size_t unused_; // Ensures alignment of kAlignment.
189 };
190
Andreas Gampe575e78c2014-11-03 23:41:03 -0800191 static_assert(sizeof(BlockHeader) % kAlignment == 0,
192 "continuous block must be kAlignment aligned");
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800193
Mathieu Chartier590fee92013-09-13 13:46:47 -0700194 friend class collector::MarkSweep;
195 DISALLOW_COPY_AND_ASSIGN(BumpPointerSpace);
196};
197
198} // namespace space
199} // namespace gc
200} // namespace art
201
202#endif // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_