blob: d73fe3bdd1d63b0ef5d5d708654125dce63699ec [file] [log] [blame]
Mathieu Chartier590fee92013-09-13 13:46:47 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
19
Mathieu Chartier692fafd2013-11-29 17:24:40 -080020#include "root_visitor.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070021#include "space.h"
22
23namespace art {
24namespace gc {
25
26namespace collector {
27 class MarkSweep;
28} // namespace collector
29
30namespace space {
31
32// A bump pointer space is a space where objects may be allocated and garbage collected.
33class BumpPointerSpace : public ContinuousMemMapAllocSpace {
34 public:
35 typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
36
37 SpaceType GetType() const {
38 return kSpaceTypeBumpPointerSpace;
39 }
40
41 // Create a bump pointer space with the requested sizes. The requested base address is not
42 // guaranteed to be granted, if it is required, the caller should call Begin on the returned
43 // space to confirm the request was granted.
44 static BumpPointerSpace* Create(const std::string& name, size_t capacity, byte* requested_begin);
45
46 // Allocate num_bytes, returns nullptr if the space is full.
47 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated);
48 mirror::Object* AllocNonvirtual(size_t num_bytes);
Mathieu Chartier692fafd2013-11-29 17:24:40 -080049 mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes);
Mathieu Chartier590fee92013-09-13 13:46:47 -070050
51 // Return the storage space required by obj.
Ian Rogersef7d42f2014-01-06 12:55:46 -080052 virtual size_t AllocationSize(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -070053
Mathieu Chartier692fafd2013-11-29 17:24:40 -080054 // NOPS unless we support free lists.
Mathieu Chartier590fee92013-09-13 13:46:47 -070055 virtual size_t Free(Thread*, mirror::Object*) {
56 return 0;
57 }
58 virtual size_t FreeList(Thread*, size_t, mirror::Object**) {
59 return 0;
60 }
61
Ian Rogersef7d42f2014-01-06 12:55:46 -080062 size_t AllocationSizeNonvirtual(mirror::Object* obj)
Mathieu Chartier590fee92013-09-13 13:46:47 -070063 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
64 return obj->SizeOf();
65 }
66
67 // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
68 // maximum reserved size of the heap.
69 void ClearGrowthLimit() {
70 growth_end_ = Limit();
71 }
72
73 // Override capacity so that we only return the possibly limited capacity
74 size_t Capacity() const {
75 return growth_end_ - begin_;
76 }
77
78 // The total amount of memory reserved for the space.
79 size_t NonGrowthLimitCapacity() const {
80 return GetMemMap()->Size();
81 }
82
83 accounting::SpaceBitmap* GetLiveBitmap() const {
84 return nullptr;
85 }
86
87 accounting::SpaceBitmap* GetMarkBitmap() const {
88 return nullptr;
89 }
90
91 // Clear the memory and reset the pointer to the start of the space.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080092 void Clear() LOCKS_EXCLUDED(block_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -070093
94 void Dump(std::ostream& os) const;
95
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080096 void RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(block_lock_);
97 void RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
98 Locks::thread_list_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -070099
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800100 uint64_t GetBytesAllocated() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
101 uint64_t GetObjectsAllocated() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
102 bool IsEmpty() const;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700103
104 bool Contains(const mirror::Object* obj) const {
105 const byte* byte_obj = reinterpret_cast<const byte*>(obj);
106 return byte_obj >= Begin() && byte_obj < End();
107 }
108
109 // TODO: Change this? Mainly used for compacting to a particular region of memory.
110 BumpPointerSpace(const std::string& name, byte* begin, byte* limit);
111
112 // Return the object which comes after obj, while ensuring alignment.
113 static mirror::Object* GetNextObject(mirror::Object* obj)
114 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
115
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800116 // Allocate a new TLAB, returns false if the allocation failed.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800117 bool AllocNewTlab(Thread* self, size_t bytes);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800118
Mathieu Chartier7410f292013-11-24 13:17:35 -0800119 virtual BumpPointerSpace* AsBumpPointerSpace() {
120 return this;
121 }
122
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800123 // Go through all of the blocks and visit the continuous objects.
124 void Walk(ObjectVisitorCallback callback, void* arg)
125 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
126
Mathieu Chartier7410f292013-11-24 13:17:35 -0800127 // Object alignment within the space.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800128 static constexpr size_t kAlignment = 8;
129
Mathieu Chartier590fee92013-09-13 13:46:47 -0700130 protected:
131 BumpPointerSpace(const std::string& name, MemMap* mem_map);
132
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800133 // Allocate a raw block of bytes.
134 byte* AllocBlock(size_t bytes) EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
135 void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
136
Mathieu Chartier590fee92013-09-13 13:46:47 -0700137 mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes, size_t* bytes_allocated)
138 EXCLUSIVE_LOCKS_REQUIRED(lock_);
139
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800140 // The main block is an unbounded block where objects go when there are no other blocks. This
141 // enables us to maintain tightly packed objects when you are not using thread local buffers for
142 // allocation.
143 // The main block is also the block which starts at address 0.
144 void UpdateMainBlock() EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700145
Mathieu Chartier590fee92013-09-13 13:46:47 -0700146 byte* growth_end_;
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800147 AtomicInteger objects_allocated_; // Accumulated from revoked thread local regions.
148 AtomicInteger bytes_allocated_; // Accumulated from revoked thread local regions.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800149 Mutex block_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800150
151 // The number of blocks in the space, if it is 0 then the space has one long continuous block
152 // which doesn't have an updated header.
153 size_t num_blocks_ GUARDED_BY(block_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700154
155 private:
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800156 struct BlockHeader {
157 size_t size_; // Size of the block in bytes, does not include the header.
158 size_t unused_; // Ensures alignment of kAlignment.
159 };
160
161 COMPILE_ASSERT(sizeof(BlockHeader) % kAlignment == 0,
162 continuous_block_must_be_kAlignment_aligned);
163
Mathieu Chartier590fee92013-09-13 13:46:47 -0700164 friend class collector::MarkSweep;
165 DISALLOW_COPY_AND_ASSIGN(BumpPointerSpace);
166};
167
168} // namespace space
169} // namespace gc
170} // namespace art
171
172#endif // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_