blob: 6ce138c235bee855339628856112de108c0907d5 [file] [log] [blame]
Ian Rogers1d54e732013-05-02 21:10:01 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_
Ian Rogers1d54e732013-05-02 21:10:01 -070019
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070020#include "malloc_space.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070021#include "space.h"
22
23namespace art {
24namespace gc {
25
26namespace collector {
27 class MarkSweep;
28} // namespace collector
29
30namespace space {
31
Ian Rogers6fac4472014-02-25 17:01:10 -080032// An alloc space is a space where objects may be allocated and garbage collected. Not final as may
33// be overridden by a ValgrindMallocSpace.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070034class DlMallocSpace : public MallocSpace {
Ian Rogers1d54e732013-05-02 21:10:01 -070035 public:
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080036 // Create a DlMallocSpace from an existing mem_map.
37 static DlMallocSpace* CreateFromMemMap(MemMap* mem_map, const std::string& name,
38 size_t starting_size, size_t initial_size,
Mathieu Chartier31f44142014-04-08 14:40:03 -070039 size_t growth_limit, size_t capacity,
40 bool can_move_objects);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080041
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070042 // Create a DlMallocSpace with the requested sizes. The requested
Ian Rogers1d54e732013-05-02 21:10:01 -070043 // base address is not guaranteed to be granted, if it is required,
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070044 // the caller should call Begin on the returned space to confirm the
45 // request was granted.
Ian Rogers1d54e732013-05-02 21:10:01 -070046 static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
Ian Rogers13735952014-10-08 12:43:28 -070047 size_t capacity, uint8_t* requested_begin, bool can_move_objects);
Ian Rogers1d54e732013-05-02 21:10:01 -070048
Ian Rogers6fac4472014-02-25 17:01:10 -080049 // Virtual to allow ValgrindMallocSpace to intercept.
50 virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
51 size_t* usable_size) OVERRIDE LOCKS_EXCLUDED(lock_);
52 // Virtual to allow ValgrindMallocSpace to intercept.
53 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
54 size_t* usable_size) OVERRIDE LOCKS_EXCLUDED(lock_) {
55 return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070056 }
Ian Rogers6fac4472014-02-25 17:01:10 -080057 // Virtual to allow ValgrindMallocSpace to intercept.
58 virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
59 return AllocationSizeNonvirtual(obj, usable_size);
60 }
61 // Virtual to allow ValgrindMallocSpace to intercept.
62 virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
63 LOCKS_EXCLUDED(lock_)
64 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
65 // Virtual to allow ValgrindMallocSpace to intercept.
66 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
67 LOCKS_EXCLUDED(lock_)
68 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
69
70 // DlMallocSpaces don't have thread local state.
71 void RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
72 }
73 void RevokeAllThreadLocalBuffers() OVERRIDE {
74 }
75
76 // Faster non-virtual allocation path.
77 mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
78 size_t* usable_size) LOCKS_EXCLUDED(lock_);
79
80 // Faster non-virtual allocation size path.
81 size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070082
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070083#ifndef NDEBUG
84 // Override only in the debug build.
85 void CheckMoreCoreForPrecondition();
86#endif
Ian Rogers1d54e732013-05-02 21:10:01 -070087
88 void* GetMspace() const {
89 return mspace_;
90 }
91
Ian Rogers6fac4472014-02-25 17:01:10 -080092 size_t Trim() OVERRIDE;
Ian Rogers1d54e732013-05-02 21:10:01 -070093
94 // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
95 // in use, indicated by num_bytes equaling zero.
Ian Rogers6fac4472014-02-25 17:01:10 -080096 void Walk(WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -070097
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -070098 // Returns the number of bytes that the space has currently obtained from the system. This is
99 // greater or equal to the amount of live data in the space.
Ian Rogers6fac4472014-02-25 17:01:10 -0800100 size_t GetFootprint() OVERRIDE;
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -0700101
Ian Rogers1d54e732013-05-02 21:10:01 -0700102 // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
Ian Rogers6fac4472014-02-25 17:01:10 -0800103 size_t GetFootprintLimit() OVERRIDE;
Ian Rogers1d54e732013-05-02 21:10:01 -0700104
105 // Set the maximum number of bytes that the heap is allowed to obtain from the system via
106 // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
107 // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
Ian Rogers6fac4472014-02-25 17:01:10 -0800108 void SetFootprintLimit(size_t limit) OVERRIDE;
Ian Rogers1d54e732013-05-02 21:10:01 -0700109
Andreas Gamped7576322014-10-24 22:13:45 -0700110 MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
Ian Rogers13735952014-10-08 12:43:28 -0700111 uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
Mathieu Chartier31f44142014-04-08 14:40:03 -0700112 bool can_move_objects);
Ian Rogers1d54e732013-05-02 21:10:01 -0700113
Ian Rogers6fac4472014-02-25 17:01:10 -0800114 uint64_t GetBytesAllocated() OVERRIDE;
115 uint64_t GetObjectsAllocated() OVERRIDE;
Ian Rogers1d54e732013-05-02 21:10:01 -0700116
Mathieu Chartier31f44142014-04-08 14:40:03 -0700117 virtual void Clear() OVERRIDE;
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700118
Ian Rogers6fac4472014-02-25 17:01:10 -0800119 bool IsDlMallocSpace() const OVERRIDE {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700120 return true;
121 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800122
123 DlMallocSpace* AsDlMallocSpace() OVERRIDE {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700124 return this;
125 }
126
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -0700127 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
128 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
129
Ian Rogers1d54e732013-05-02 21:10:01 -0700130 protected:
Andreas Gamped7576322014-10-24 22:13:45 -0700131 DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name, void* mspace,
132 uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
133 bool can_move_objects, size_t starting_size);
Ian Rogers1d54e732013-05-02 21:10:01 -0700134
135 private:
Ian Rogers6fac4472014-02-25 17:01:10 -0800136 mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated,
137 size_t* usable_size)
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700138 EXCLUSIVE_LOCKS_REQUIRED(lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700139
Ian Rogers6fac4472014-02-25 17:01:10 -0800140 void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800141 size_t /*maximum_size*/, bool /*low_memory_mode*/) OVERRIDE {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700142 return CreateMspace(base, morecore_start, initial_size);
143 }
144 static void* CreateMspace(void* base, size_t morecore_start, size_t initial_size);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700145
Ian Rogers1d54e732013-05-02 21:10:01 -0700146 // The boundary tag overhead.
Ian Rogers13735952014-10-08 12:43:28 -0700147 static const size_t kChunkOverhead = sizeof(intptr_t);
Ian Rogers1d54e732013-05-02 21:10:01 -0700148
Ian Rogers6fac4472014-02-25 17:01:10 -0800149 // Underlying malloc space.
Mathieu Chartier31f44142014-04-08 14:40:03 -0700150 void* mspace_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700151
Ian Rogers1d54e732013-05-02 21:10:01 -0700152 friend class collector::MarkSweep;
153
154 DISALLOW_COPY_AND_ASSIGN(DlMallocSpace);
155};
156
157} // namespace space
158} // namespace gc
159} // namespace art
160
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700161#endif // ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_