blob: 9dc6f31e6054222b02b21e2c092f94afecf91ea3 [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_H_
19
20#include "gc/allocator/rosalloc.h"
21#include "malloc_space.h"
22#include "space.h"
23
24namespace art {
25namespace gc {
26
27namespace collector {
28 class MarkSweep;
29} // namespace collector
30
31namespace space {
32
Ian Rogers6fac4472014-02-25 17:01:10 -080033// An alloc space implemented using a runs-of-slots memory allocator. Not final as may be
Evgenii Stepanov1e133742015-05-20 12:30:59 -070034// overridden by a MemoryToolMallocSpace.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070035class RosAllocSpace : public MallocSpace {
36 public:
37 // Create a RosAllocSpace with the requested sizes. The requested
38 // base address is not guaranteed to be granted, if it is required,
39 // the caller should call Begin on the returned space to confirm the
40 // request was granted.
41 static RosAllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
Ian Rogers13735952014-10-08 12:43:28 -070042 size_t capacity, uint8_t* requested_begin, bool low_memory_mode,
Mathieu Chartier31f44142014-04-08 14:40:03 -070043 bool can_move_objects);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080044 static RosAllocSpace* CreateFromMemMap(MemMap* mem_map, const std::string& name,
45 size_t starting_size, size_t initial_size,
46 size_t growth_limit, size_t capacity,
Mathieu Chartier31f44142014-04-08 14:40:03 -070047 bool low_memory_mode, bool can_move_objects);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070048
Ian Rogers6fac4472014-02-25 17:01:10 -080049 mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070050 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
51 OVERRIDE LOCKS_EXCLUDED(lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -080052 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070053 size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE {
54 return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
55 bytes_tl_bulk_allocated);
Ian Rogers6fac4472014-02-25 17:01:10 -080056 }
Mathieu Chartier0651d412014-04-29 14:37:57 -070057 mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070058 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
Mathieu Chartier0651d412014-04-29 14:37:57 -070059 OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070060 return AllocNonvirtualThreadUnsafe(self, num_bytes, bytes_allocated, usable_size,
61 bytes_tl_bulk_allocated);
Mathieu Chartier0651d412014-04-29 14:37:57 -070062 }
Ian Rogers6fac4472014-02-25 17:01:10 -080063 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070064 return AllocationSizeNonvirtual<true>(obj, usable_size);
Ian Rogers6fac4472014-02-25 17:01:10 -080065 }
66 size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
Ian Rogersef7d42f2014-01-06 12:55:46 -080067 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -080068 size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
Ian Rogersef7d42f2014-01-06 12:55:46 -080069 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070070
Ian Rogers6fac4472014-02-25 17:01:10 -080071 mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070072 size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
Ian Rogers6fac4472014-02-25 17:01:10 -080073 // RosAlloc zeroes memory internally.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070074 return AllocCommon(self, num_bytes, bytes_allocated, usable_size,
75 bytes_tl_bulk_allocated);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070076 }
Mathieu Chartier0651d412014-04-29 14:37:57 -070077 mirror::Object* AllocNonvirtualThreadUnsafe(Thread* self, size_t num_bytes,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070078 size_t* bytes_allocated, size_t* usable_size,
79 size_t* bytes_tl_bulk_allocated) {
Mathieu Chartier0651d412014-04-29 14:37:57 -070080 // RosAlloc zeroes memory internally. Pass in false for thread unsafe.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070081 return AllocCommon<false>(self, num_bytes, bytes_allocated, usable_size,
82 bytes_tl_bulk_allocated);
Mathieu Chartier0651d412014-04-29 14:37:57 -070083 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070084
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070085 // Returns true if the given allocation request can be allocated in
86 // an existing thread local run without allocating a new run.
87 ALWAYS_INLINE bool CanAllocThreadLocal(Thread* self, size_t num_bytes);
88 // Allocate the given allocation request in an existing thread local
89 // run without allocating a new run.
90 ALWAYS_INLINE mirror::Object* AllocThreadLocal(Thread* self, size_t num_bytes,
91 size_t* bytes_allocated);
92 size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
93 return MaxBytesBulkAllocatedForNonvirtual(num_bytes);
94 }
95 ALWAYS_INLINE size_t MaxBytesBulkAllocatedForNonvirtual(size_t num_bytes);
96
Ian Rogers6fac4472014-02-25 17:01:10 -080097 // TODO: NO_THREAD_SAFETY_ANALYSIS because SizeOf() requires that mutator_lock is held.
Evgenii Stepanov1e133742015-05-20 12:30:59 -070098 template<bool kMaybeIsRunningOnMemoryTool>
Ian Rogers6fac4472014-02-25 17:01:10 -080099 size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
100 NO_THREAD_SAFETY_ANALYSIS;
101
102 allocator::RosAlloc* GetRosAlloc() const {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700103 return rosalloc_;
104 }
105
Ian Rogers6fac4472014-02-25 17:01:10 -0800106 size_t Trim() OVERRIDE;
107 void Walk(WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
108 size_t GetFootprint() OVERRIDE;
109 size_t GetFootprintLimit() OVERRIDE;
110 void SetFootprintLimit(size_t limit) OVERRIDE;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700111
Ian Rogers6fac4472014-02-25 17:01:10 -0800112 void Clear() OVERRIDE;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700113
Andreas Gamped7576322014-10-24 22:13:45 -0700114 MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
Ian Rogers13735952014-10-08 12:43:28 -0700115 uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
Mathieu Chartier31f44142014-04-08 14:40:03 -0700116 bool can_move_objects) OVERRIDE;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700117
Ian Rogers6fac4472014-02-25 17:01:10 -0800118 uint64_t GetBytesAllocated() OVERRIDE;
119 uint64_t GetObjectsAllocated() OVERRIDE;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700120
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700121 size_t RevokeThreadLocalBuffers(Thread* thread);
122 size_t RevokeAllThreadLocalBuffers();
Ian Rogers68d8b422014-07-17 11:09:10 -0700123 void AssertThreadLocalBuffersAreRevoked(Thread* thread);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700124 void AssertAllThreadLocalBuffersAreRevoked();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700125
126 // Returns the class of a recently freed object.
127 mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
128
Ian Rogers6fac4472014-02-25 17:01:10 -0800129 bool IsRosAllocSpace() const OVERRIDE {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700130 return true;
131 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800132
133 RosAllocSpace* AsRosAllocSpace() OVERRIDE {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700134 return this;
135 }
136
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800137 void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
138 rosalloc_->Verify();
139 }
140
Mathieu Chartier661974a2014-01-09 11:23:53 -0800141 virtual ~RosAllocSpace();
142
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -0700143 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE {
144 rosalloc_->LogFragmentationAllocFailure(os, failed_alloc_bytes);
145 }
146
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700147 protected:
Andreas Gamped7576322014-10-24 22:13:45 -0700148 RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
149 allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end, uint8_t* limit,
150 size_t growth_limit, bool can_move_objects, size_t starting_size,
151 bool low_memory_mode);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700152
153 private:
Mathieu Chartier0651d412014-04-29 14:37:57 -0700154 template<bool kThreadSafe = true>
Ian Rogers6fac4472014-02-25 17:01:10 -0800155 mirror::Object* AllocCommon(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700156 size_t* usable_size, size_t* bytes_tl_bulk_allocated);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700157
Ian Rogers6fac4472014-02-25 17:01:10 -0800158 void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800159 size_t maximum_size, bool low_memory_mode) OVERRIDE {
Andreas Gamped7576322014-10-24 22:13:45 -0700160 return CreateRosAlloc(base, morecore_start, initial_size, maximum_size, low_memory_mode,
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700161 RUNNING_ON_MEMORY_TOOL != 0);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700162 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800163 static allocator::RosAlloc* CreateRosAlloc(void* base, size_t morecore_start, size_t initial_size,
Andreas Gamped7576322014-10-24 22:13:45 -0700164 size_t maximum_size, bool low_memory_mode,
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700165 bool running_on_memory_tool);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700166
167 void InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
Hiroshi Yamauchi1cd53db2014-03-28 15:26:48 -0700168 void* arg, bool do_null_callback_at_end)
169 LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_);
170 void InspectAllRosAllocWithSuspendAll(
171 void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
172 void* arg, bool do_null_callback_at_end)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700173 LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_);
174
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700175 // Underlying rosalloc.
Mathieu Chartier31f44142014-04-08 14:40:03 -0700176 allocator::RosAlloc* rosalloc_;
177
178 const bool low_memory_mode_;
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800179
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700180 friend class collector::MarkSweep;
181
182 DISALLOW_COPY_AND_ASSIGN(RosAllocSpace);
183};
184
185} // namespace space
186} // namespace gc
187} // namespace art
188
189#endif // ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_H_