blob: 850a0066c539b0bcb1863898b7158f750afa9569 [file] [log] [blame]
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070019
Mathieu Chartierbad02672014-08-25 13:08:22 -070020#include "base/allocator.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070021#include "dlmalloc_space.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080022#include "safe_map.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070023#include "space.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080024
25#include <set>
26#include <vector>
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070027
28namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070029namespace gc {
30namespace space {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070031
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -070032class AllocationInfo;
33
Mathieu Chartier2dbe6272014-09-16 10:43:23 -070034enum LargeObjectSpaceType {
35 kLargeObjectSpaceTypeDisabled,
36 kLargeObjectSpaceTypeMap,
37 kLargeObjectSpaceTypeFreeList,
38};
39
Ian Rogers22a20862013-03-16 16:34:57 -070040// Abstraction implemented by all large object spaces.
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070041class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
42 public:
Ian Rogers6fac4472014-02-25 17:01:10 -080043 SpaceType GetType() const OVERRIDE {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070044 return kSpaceTypeLargeObjectSpace;
45 }
Ian Rogers6fac4472014-02-25 17:01:10 -080046 void SwapBitmaps();
47 void CopyLiveToMarked();
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070048 virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
49 virtual ~LargeObjectSpace() {}
50
Ian Rogers6fac4472014-02-25 17:01:10 -080051 uint64_t GetBytesAllocated() OVERRIDE {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070052 return num_bytes_allocated_;
53 }
Ian Rogers6fac4472014-02-25 17:01:10 -080054 uint64_t GetObjectsAllocated() OVERRIDE {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070055 return num_objects_allocated_;
56 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070057 uint64_t GetTotalBytesAllocated() const {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070058 return total_bytes_allocated_;
59 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070060 uint64_t GetTotalObjectsAllocated() const {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070061 return total_objects_allocated_;
62 }
Ian Rogers6fac4472014-02-25 17:01:10 -080063 size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
Ian Rogers6fac4472014-02-25 17:01:10 -080064 // LargeObjectSpaces don't have thread local state.
65 void RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
66 }
67 void RevokeAllThreadLocalBuffers() OVERRIDE {
68 }
Ian Rogers6fac4472014-02-25 17:01:10 -080069 bool IsAllocSpace() const OVERRIDE {
Mathieu Chartier590fee92013-09-13 13:46:47 -070070 return true;
71 }
Ian Rogers6fac4472014-02-25 17:01:10 -080072 AllocSpace* AsAllocSpace() OVERRIDE {
Mathieu Chartier590fee92013-09-13 13:46:47 -070073 return this;
74 }
Mathieu Chartier10fb83a2014-06-15 15:15:43 -070075 collector::ObjectBytePair Sweep(bool swap_bitmaps);
Mathieu Chartier31f44142014-04-08 14:40:03 -070076 virtual bool CanMoveObjects() const OVERRIDE {
77 return false;
78 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070079 // Current address at which the space begins, which may vary as the space is filled.
Ian Rogers13735952014-10-08 12:43:28 -070080 uint8_t* Begin() const {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070081 return begin_;
82 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070083 // Current address at which the space ends, which may vary as the space is filled.
Ian Rogers13735952014-10-08 12:43:28 -070084 uint8_t* End() const {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070085 return end_;
86 }
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -070087 // Current size of space
88 size_t Size() const {
89 return End() - Begin();
90 }
91 // Return true if we contain the specified address.
92 bool Contains(const mirror::Object* obj) const {
Ian Rogers13735952014-10-08 12:43:28 -070093 const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -070094 return Begin() <= byte_obj && byte_obj < End();
95 }
Mathieu Chartierb363f662014-07-16 13:28:58 -070096 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
97 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
98
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070099 protected:
Ian Rogers13735952014-10-08 12:43:28 -0700100 explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700101 static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700102
103 // Approximate number of bytes which have been allocated into the space.
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700104 uint64_t num_bytes_allocated_;
105 uint64_t num_objects_allocated_;
106 uint64_t total_bytes_allocated_;
107 uint64_t total_objects_allocated_;
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700108 // Begin and end, may change as more large objects are allocated.
Ian Rogers13735952014-10-08 12:43:28 -0700109 uint8_t* begin_;
110 uint8_t* end_;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700111
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700112 friend class Space;
Ian Rogers1d54e732013-05-02 21:10:01 -0700113
114 private:
115 DISALLOW_COPY_AND_ASSIGN(LargeObjectSpace);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700116};
117
Ian Rogers22a20862013-03-16 16:34:57 -0700118// A discontinuous large object space implemented by individual mmap/munmap calls.
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700119class LargeObjectMapSpace : public LargeObjectSpace {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700120 public:
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700121 // Creates a large object space. Allocations into the large object space use memory maps instead
122 // of malloc.
123 static LargeObjectMapSpace* Create(const std::string& name);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700124 // Return the storage space required by obj.
Ian Rogers6fac4472014-02-25 17:01:10 -0800125 size_t AllocationSize(mirror::Object* obj, size_t* usable_size);
126 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
127 size_t* usable_size);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800128 size_t Free(Thread* self, mirror::Object* ptr);
Ian Rogers6fac4472014-02-25 17:01:10 -0800129 void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
Ian Rogersa3dd0b32013-03-19 19:30:59 -0700130 // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
Ian Rogers1d54e732013-05-02 21:10:01 -0700131 bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
132
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700133 protected:
Brian Carlstrom93ba8932013-07-17 21:31:49 -0700134 explicit LargeObjectMapSpace(const std::string& name);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700135 virtual ~LargeObjectMapSpace() {}
136
137 // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
Ian Rogers22a20862013-03-16 16:34:57 -0700138 mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Mathieu Chartierbad02672014-08-25 13:08:22 -0700139 std::vector<mirror::Object*, TrackingAllocator<mirror::Object*, kAllocatorTagLOS>> large_objects_
140 GUARDED_BY(lock_);
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700141 typedef SafeMap<mirror::Object*, MemMap*, std::less<mirror::Object*>,
Mathieu Chartierbad02672014-08-25 13:08:22 -0700142 TrackingAllocator<std::pair<mirror::Object*, MemMap*>, kAllocatorTagLOSMaps>> MemMaps;
Ian Rogers22a20862013-03-16 16:34:57 -0700143 MemMaps mem_maps_ GUARDED_BY(lock_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700144};
145
Ian Rogers22a20862013-03-16 16:34:57 -0700146// A continuous large object space with a free-list to handle holes.
Ian Rogers6fac4472014-02-25 17:01:10 -0800147class FreeListSpace FINAL : public LargeObjectSpace {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700148 public:
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700149 static constexpr size_t kAlignment = kPageSize;
150
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700151 virtual ~FreeListSpace();
Ian Rogers13735952014-10-08 12:43:28 -0700152 static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
Ian Rogers6fac4472014-02-25 17:01:10 -0800153 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
154 EXCLUSIVE_LOCKS_REQUIRED(lock_);
155 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
156 size_t* usable_size) OVERRIDE;
157 size_t Free(Thread* self, mirror::Object* obj) OVERRIDE;
Ian Rogers6fac4472014-02-25 17:01:10 -0800158 void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700159 void Dump(std::ostream& os) const;
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700160
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700161 protected:
Ian Rogers13735952014-10-08 12:43:28 -0700162 FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end);
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700163 size_t GetSlotIndexForAddress(uintptr_t address) const {
164 DCHECK(Contains(reinterpret_cast<mirror::Object*>(address)));
165 return (address - reinterpret_cast<uintptr_t>(Begin())) / kAlignment;
166 }
167 size_t GetSlotIndexForAllocationInfo(const AllocationInfo* info) const;
168 AllocationInfo* GetAllocationInfoForAddress(uintptr_t address);
169 const AllocationInfo* GetAllocationInfoForAddress(uintptr_t address) const;
170 uintptr_t GetAllocationAddressForSlot(size_t slot) const {
171 return reinterpret_cast<uintptr_t>(Begin()) + slot * kAlignment;
172 }
173 uintptr_t GetAddressForAllocationInfo(const AllocationInfo* info) const {
174 return GetAllocationAddressForSlot(GetSlotIndexForAllocationInfo(info));
175 }
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700176 // Removes header from the free blocks set by finding the corresponding iterator and erasing it.
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700177 void RemoveFreePrev(AllocationInfo* info) EXCLUSIVE_LOCKS_REQUIRED(lock_);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700178
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700179 class SortByPrevFree {
180 public:
181 bool operator()(const AllocationInfo* a, const AllocationInfo* b) const;
182 };
183 typedef std::set<AllocationInfo*, SortByPrevFree,
184 TrackingAllocator<AllocationInfo*, kAllocatorTagLOSFreeList>> FreeBlocks;
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700185
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700186 // There is not footer for any allocations at the end of the space, so we keep track of how much
187 // free space there is at the end manually.
Ian Rogers700a4022014-05-19 16:49:03 -0700188 std::unique_ptr<MemMap> mem_map_;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700189 // Side table for allocation info, one per page.
190 std::unique_ptr<MemMap> allocation_info_map_;
191 AllocationInfo* allocation_info_;
192
Ian Rogers22a20862013-03-16 16:34:57 -0700193 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700194 // Free bytes at the end of the space.
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700195 size_t free_end_ GUARDED_BY(lock_);
196 FreeBlocks free_blocks_ GUARDED_BY(lock_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700197};
198
Ian Rogers1d54e732013-05-02 21:10:01 -0700199} // namespace space
200} // namespace gc
201} // namespace art
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700202
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700203#endif // ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_