blob: 0b9f7ad7131adbc6363e7ab2649d742aee02c117 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Ian Rogers1d54e732013-05-02 21:10:01 -070017#include "dlmalloc_space.h"
Mathieu Chartiereb5710e2013-07-25 15:19:42 -070018#include "large_object_space.h"
Mathieu Chartiera1602f22014-01-13 17:19:19 -080019#include "zygote_space.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070020
Brian Carlstrom9b7f2c22011-09-27 14:35:04 -070021#include "common_test.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070022#include "globals.h"
Elliott Hughes90a33692011-08-30 13:27:07 -070023#include "UniquePtr.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070024#include "mirror/array-inl.h"
25#include "mirror/object-inl.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070026
Ian Rogers3bb17a62012-01-27 23:56:44 -080027#include <stdint.h>
28
Carl Shapiro69759ea2011-07-21 18:13:35 -070029namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070030namespace gc {
31namespace space {
Carl Shapiro69759ea2011-07-21 18:13:35 -070032
Ian Rogers3bb17a62012-01-27 23:56:44 -080033class SpaceTest : public CommonTest {
34 public:
Mathieu Chartier590fee92013-09-13 13:46:47 -070035 void AddSpace(ContinuousSpace* space) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070036 // For RosAlloc, revoke the thread local runs before moving onto a
37 // new alloc space.
38 Runtime::Current()->GetHeap()->RevokeAllThreadLocalBuffers();
Mathieu Chartier590fee92013-09-13 13:46:47 -070039 Runtime::Current()->GetHeap()->AddSpace(space);
Ian Rogers1d54e732013-05-02 21:10:01 -070040 }
Mathieu Chartier4e305412014-02-19 10:54:44 -080041 void InstallClass(SirtRef<mirror::Object>& o, size_t size)
42 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -080043 // Note the minimum size, which is the size of a zero-length byte array.
44 EXPECT_GE(size, SizeOfZeroLengthByteArray());
Mathieu Chartier4e305412014-02-19 10:54:44 -080045 SirtRef<mirror::ClassLoader> null_loader(Thread::Current(), nullptr);
46 mirror::Class* byte_array_class = Runtime::Current()->GetClassLinker()->FindClass("[B",
47 null_loader);
48 EXPECT_TRUE(byte_array_class != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070049 o->SetClass(byte_array_class);
Mathieu Chartier4e305412014-02-19 10:54:44 -080050 mirror::Array* arr = o->AsArray<kVerifyNone>();
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -080051 size_t header_size = SizeOfZeroLengthByteArray();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070052 int32_t length = size - header_size;
53 arr->SetLength(length);
Mathieu Chartier4e305412014-02-19 10:54:44 -080054 EXPECT_EQ(arr->SizeOf<kVerifyNone>(), size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070055 }
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -080056
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -080057 static size_t SizeOfZeroLengthByteArray() {
58 return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value();
59 }
60
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -080061 static MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
62 size_t capacity, byte* requested_begin) {
63 return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin);
64 }
65 static MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
66 size_t capacity, byte* requested_begin) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080067 return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
68 Runtime::Current()->GetHeap()->IsLowMemoryMode());
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -080069 }
70
71 typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
72 size_t capacity, byte* requested_begin);
73 void InitTestBody(CreateSpaceFn create_space);
74 void ZygoteSpaceTestBody(CreateSpaceFn create_space);
75 void AllocAndFreeTestBody(CreateSpaceFn create_space);
76 void AllocAndFreeListTestBody(CreateSpaceFn create_space);
77
78 void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
79 int round, size_t growth_limit);
80 void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space);
Ian Rogers3bb17a62012-01-27 23:56:44 -080081};
Brian Carlstrom9b7f2c22011-09-27 14:35:04 -070082
Mathieu Chartiereb5710e2013-07-25 15:19:42 -070083static size_t test_rand(size_t* seed) {
84 *seed = *seed * 1103515245 + 12345;
85 return *seed;
86}
87
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -080088void SpaceTest::InitTestBody(CreateSpaceFn create_space) {
Carl Shapiro69759ea2011-07-21 18:13:35 -070089 {
jeffhaoc1160702011-10-27 15:48:45 -070090 // Init < max == growth
Mathieu Chartier4e305412014-02-19 10:54:44 -080091 UniquePtr<Space> space(create_space("test", 16 * MB, 32 * MB, 32 * MB, nullptr));
92 EXPECT_TRUE(space.get() != nullptr);
Carl Shapiro69759ea2011-07-21 18:13:35 -070093 }
94 {
jeffhaoc1160702011-10-27 15:48:45 -070095 // Init == max == growth
Mathieu Chartier4e305412014-02-19 10:54:44 -080096 UniquePtr<Space> space(create_space("test", 16 * MB, 16 * MB, 16 * MB, nullptr));
97 EXPECT_TRUE(space.get() != nullptr);
Carl Shapiro69759ea2011-07-21 18:13:35 -070098 }
99 {
jeffhaoc1160702011-10-27 15:48:45 -0700100 // Init > max == growth
Mathieu Chartier4e305412014-02-19 10:54:44 -0800101 UniquePtr<Space> space(create_space("test", 32 * MB, 16 * MB, 16 * MB, nullptr));
102 EXPECT_TRUE(space.get() == nullptr);
jeffhaoc1160702011-10-27 15:48:45 -0700103 }
104 {
105 // Growth == init < max
Mathieu Chartier4e305412014-02-19 10:54:44 -0800106 UniquePtr<Space> space(create_space("test", 16 * MB, 16 * MB, 32 * MB, nullptr));
107 EXPECT_TRUE(space.get() != nullptr);
jeffhaoc1160702011-10-27 15:48:45 -0700108 }
109 {
110 // Growth < init < max
Mathieu Chartier4e305412014-02-19 10:54:44 -0800111 UniquePtr<Space> space(create_space("test", 16 * MB, 8 * MB, 32 * MB, nullptr));
112 EXPECT_TRUE(space.get() == nullptr);
jeffhaoc1160702011-10-27 15:48:45 -0700113 }
114 {
115 // Init < growth < max
Mathieu Chartier4e305412014-02-19 10:54:44 -0800116 UniquePtr<Space> space(create_space("test", 8 * MB, 16 * MB, 32 * MB, nullptr));
117 EXPECT_TRUE(space.get() != nullptr);
jeffhaoc1160702011-10-27 15:48:45 -0700118 }
119 {
120 // Init < max < growth
Mathieu Chartier4e305412014-02-19 10:54:44 -0800121 UniquePtr<Space> space(create_space("test", 8 * MB, 32 * MB, 16 * MB, nullptr));
122 EXPECT_TRUE(space.get() == nullptr);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700123 }
124}
125
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800126TEST_F(SpaceTest, Init_DlMallocSpace) {
127 InitTestBody(SpaceTest::CreateDlMallocSpace);
128}
129TEST_F(SpaceTest, Init_RosAllocSpace) {
130 InitTestBody(SpaceTest::CreateRosAllocSpace);
131}
132
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700133// TODO: This test is not very good, we should improve it.
134// The test should do more allocations before the creation of the ZygoteSpace, and then do
135// allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
136// the GC works with the ZygoteSpace.
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800137void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
138 size_t dummy = 0;
Mathieu Chartier4e305412014-02-19 10:54:44 -0800139 MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
140 ASSERT_TRUE(space != nullptr);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700141
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800142 // Make space findable to the heap, will also delete space when runtime is cleaned up
143 AddSpace(space);
144 Thread* self = Thread::Current();
Mathieu Chartier4e305412014-02-19 10:54:44 -0800145 ScopedObjectAccess soa(self);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700146
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800147 // Succeeds, fits without adjusting the footprint limit.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800148 SirtRef<mirror::Object> ptr1(self, space->Alloc(self, 1 * MB, &dummy));
149 EXPECT_TRUE(ptr1.get() != nullptr);
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800150 InstallClass(ptr1, 1 * MB);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700151
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800152 // Fails, requires a higher footprint limit.
153 mirror::Object* ptr2 = space->Alloc(self, 8 * MB, &dummy);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800154 EXPECT_TRUE(ptr2 == nullptr);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700155
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800156 // Succeeds, adjusts the footprint.
157 size_t ptr3_bytes_allocated;
Mathieu Chartier4e305412014-02-19 10:54:44 -0800158 SirtRef<mirror::Object> ptr3(self, space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated));
159 EXPECT_TRUE(ptr3.get() != nullptr);
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800160 EXPECT_LE(8U * MB, ptr3_bytes_allocated);
161 InstallClass(ptr3, 8 * MB);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700162
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800163 // Fails, requires a higher footprint limit.
164 mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800165 EXPECT_TRUE(ptr4 == nullptr);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700166
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800167 // Also fails, requires a higher allowed footprint.
168 mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800169 EXPECT_TRUE(ptr5 == nullptr);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700170
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800171 // Release some memory.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800172 size_t free3 = space->AllocationSize(ptr3.get());
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800173 EXPECT_EQ(free3, ptr3_bytes_allocated);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800174 EXPECT_EQ(free3, space->Free(self, ptr3.reset(nullptr)));
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800175 EXPECT_LE(8U * MB, free3);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700176
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800177 // Succeeds, now that memory has been freed.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800178 SirtRef<mirror::Object> ptr6(self, space->AllocWithGrowth(self, 9 * MB, &dummy));
179 EXPECT_TRUE(ptr6.get() != nullptr);
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800180 InstallClass(ptr6, 9 * MB);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700181
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800182 // Final clean up.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800183 size_t free1 = space->AllocationSize(ptr1.get());
184 space->Free(self, ptr1.reset(nullptr));
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800185 EXPECT_LE(1U * MB, free1);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700186
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800187 // Make sure that the zygote space isn't directly at the start of the space.
188 space->Alloc(self, 1U * MB, &dummy);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800189
190 gc::Heap* heap = Runtime::Current()->GetHeap();
191 space::Space* old_space = space;
192 heap->RemoveSpace(old_space);
193 space::ZygoteSpace* zygote_space = space->CreateZygoteSpace("alloc space",
194 heap->IsLowMemoryMode(),
195 &space);
196 delete old_space;
197 // Add the zygote space.
198 AddSpace(zygote_space);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700199
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800200 // Make space findable to the heap, will also delete space when runtime is cleaned up
201 AddSpace(space);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700202
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800203 // Succeeds, fits without adjusting the footprint limit.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800204 ptr1.reset(space->Alloc(self, 1 * MB, &dummy));
205 EXPECT_TRUE(ptr1.get() != nullptr);
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800206 InstallClass(ptr1, 1 * MB);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700207
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800208 // Fails, requires a higher footprint limit.
209 ptr2 = space->Alloc(self, 8 * MB, &dummy);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800210 EXPECT_TRUE(ptr2 == nullptr);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700211
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800212 // Succeeds, adjusts the footprint.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800213 ptr3.reset(space->AllocWithGrowth(self, 2 * MB, &dummy));
214 EXPECT_TRUE(ptr3.get() != nullptr);
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800215 InstallClass(ptr3, 2 * MB);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800216 space->Free(self, ptr3.reset(nullptr));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700217
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800218 // Final clean up.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800219 free1 = space->AllocationSize(ptr1.get());
220 space->Free(self, ptr1.reset(nullptr));
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800221 EXPECT_LE(1U * MB, free1);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700222}
223
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800224TEST_F(SpaceTest, ZygoteSpace_DlMallocSpace) {
225 ZygoteSpaceTestBody(SpaceTest::CreateDlMallocSpace);
226}
227
228TEST_F(SpaceTest, ZygoteSpace_RosAllocSpace) {
229 ZygoteSpaceTestBody(SpaceTest::CreateRosAllocSpace);
230}
231
232void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700233 size_t dummy = 0;
Mathieu Chartier4e305412014-02-19 10:54:44 -0800234 MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
235 ASSERT_TRUE(space != nullptr);
Ian Rogers50b35e22012-10-04 10:09:15 -0700236 Thread* self = Thread::Current();
Mathieu Chartier4e305412014-02-19 10:54:44 -0800237 ScopedObjectAccess soa(self);
Ian Rogers30fab402012-01-23 15:43:46 -0800238
Ian Rogers3bb17a62012-01-27 23:56:44 -0800239 // Make space findable to the heap, will also delete space when runtime is cleaned up
Mathieu Chartier590fee92013-09-13 13:46:47 -0700240 AddSpace(space);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700241
Ian Rogers3bb17a62012-01-27 23:56:44 -0800242 // Succeeds, fits without adjusting the footprint limit.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800243 SirtRef<mirror::Object> ptr1(self, space->Alloc(self, 1 * MB, &dummy));
244 EXPECT_TRUE(ptr1.get() != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700245 InstallClass(ptr1, 1 * MB);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700246
Ian Rogers3bb17a62012-01-27 23:56:44 -0800247 // Fails, requires a higher footprint limit.
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700248 mirror::Object* ptr2 = space->Alloc(self, 8 * MB, &dummy);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800249 EXPECT_TRUE(ptr2 == nullptr);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700250
251 // Succeeds, adjusts the footprint.
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700252 size_t ptr3_bytes_allocated;
Mathieu Chartier4e305412014-02-19 10:54:44 -0800253 SirtRef<mirror::Object> ptr3(self, space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated));
254 EXPECT_TRUE(ptr3.get() != nullptr);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700255 EXPECT_LE(8U * MB, ptr3_bytes_allocated);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700256 InstallClass(ptr3, 8 * MB);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700257
Ian Rogers3bb17a62012-01-27 23:56:44 -0800258 // Fails, requires a higher footprint limit.
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700259 mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800260 EXPECT_TRUE(ptr4 == nullptr);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700261
262 // Also fails, requires a higher allowed footprint.
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700263 mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800264 EXPECT_TRUE(ptr5 == nullptr);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700265
266 // Release some memory.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800267 size_t free3 = space->AllocationSize(ptr3.get());
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700268 EXPECT_EQ(free3, ptr3_bytes_allocated);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800269 space->Free(self, ptr3.reset(nullptr));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700270 EXPECT_LE(8U * MB, free3);
271
272 // Succeeds, now that memory has been freed.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800273 SirtRef<mirror::Object> ptr6(self, space->AllocWithGrowth(self, 9 * MB, &dummy));
274 EXPECT_TRUE(ptr6.get() != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700275 InstallClass(ptr6, 9 * MB);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700276
277 // Final clean up.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800278 size_t free1 = space->AllocationSize(ptr1.get());
279 space->Free(self, ptr1.reset(nullptr));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700280 EXPECT_LE(1U * MB, free1);
281}
282
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800283TEST_F(SpaceTest, AllocAndFree_DlMallocSpace) {
284 AllocAndFreeTestBody(SpaceTest::CreateDlMallocSpace);
285}
286TEST_F(SpaceTest, AllocAndFree_RosAllocSpace) {
287 AllocAndFreeTestBody(SpaceTest::CreateRosAllocSpace);
288}
289
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700290TEST_F(SpaceTest, LargeObjectTest) {
291 size_t rand_seed = 0;
292 for (size_t i = 0; i < 2; ++i) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800293 LargeObjectSpace* los = nullptr;
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700294 if (i == 0) {
295 los = space::LargeObjectMapSpace::Create("large object space");
296 } else {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800297 los = space::FreeListSpace::Create("large object space", nullptr, 128 * MB);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700298 }
299
300 static const size_t num_allocations = 64;
301 static const size_t max_allocation_size = 0x100000;
302 std::vector<std::pair<mirror::Object*, size_t> > requests;
303
304 for (size_t phase = 0; phase < 2; ++phase) {
305 while (requests.size() < num_allocations) {
306 size_t request_size = test_rand(&rand_seed) % max_allocation_size;
307 size_t allocation_size = 0;
308 mirror::Object* obj = los->Alloc(Thread::Current(), request_size, &allocation_size);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800309 ASSERT_TRUE(obj != nullptr);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700310 ASSERT_EQ(allocation_size, los->AllocationSize(obj));
311 ASSERT_GE(allocation_size, request_size);
312 // Fill in our magic value.
313 byte magic = (request_size & 0xFF) | 1;
314 memset(obj, magic, request_size);
315 requests.push_back(std::make_pair(obj, request_size));
316 }
317
318 // "Randomly" shuffle the requests.
319 for (size_t k = 0; k < 10; ++k) {
320 for (size_t j = 0; j < requests.size(); ++j) {
321 std::swap(requests[j], requests[test_rand(&rand_seed) % requests.size()]);
322 }
323 }
324
325 // Free 1 / 2 the allocations the first phase, and all the second phase.
326 size_t limit = !phase ? requests.size() / 2 : 0;
327 while (requests.size() > limit) {
328 mirror::Object* obj = requests.back().first;
329 size_t request_size = requests.back().second;
330 requests.pop_back();
331 byte magic = (request_size & 0xFF) | 1;
332 for (size_t k = 0; k < request_size; ++k) {
333 ASSERT_EQ(reinterpret_cast<const byte*>(obj)[k], magic);
334 }
335 ASSERT_GE(los->Free(Thread::Current(), obj), request_size);
336 }
337 }
338
339 size_t bytes_allocated = 0;
340 // Checks that the coalescing works.
341 mirror::Object* obj = los->Alloc(Thread::Current(), 100 * MB, &bytes_allocated);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800342 EXPECT_TRUE(obj != nullptr);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700343 los->Free(Thread::Current(), obj);
344
345 EXPECT_EQ(0U, los->GetBytesAllocated());
346 EXPECT_EQ(0U, los->GetObjectsAllocated());
347 delete los;
348 }
349}
350
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800351void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800352 MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
353 ASSERT_TRUE(space != nullptr);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800354
355 // Make space findable to the heap, will also delete space when runtime is cleaned up
Mathieu Chartier590fee92013-09-13 13:46:47 -0700356 AddSpace(space);
Ian Rogers50b35e22012-10-04 10:09:15 -0700357 Thread* self = Thread::Current();
Mathieu Chartier4e305412014-02-19 10:54:44 -0800358 ScopedObjectAccess soa(self);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800359
360 // Succeeds, fits without adjusting the max allowed footprint.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800361 mirror::Object* lots_of_objects[1024];
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700362 for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700363 size_t allocation_size = 0;
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -0800364 size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
365 lots_of_objects[i] = space->Alloc(self, size_of_zero_length_byte_array, &allocation_size);
Ian Rogersef7d42f2014-01-06 12:55:46 -0800366 EXPECT_TRUE(lots_of_objects[i] != nullptr);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800367 SirtRef<mirror::Object> obj(self, lots_of_objects[i]);
368 InstallClass(obj, size_of_zero_length_byte_array);
369 lots_of_objects[i] = obj.get();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700370 EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i]));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800371 }
372
Mathieu Chartier4e305412014-02-19 10:54:44 -0800373 // Release memory and check pointers are nullptr.
374 space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
375 for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
376 EXPECT_TRUE(lots_of_objects[i] == nullptr);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800377 }
378
379 // Succeeds, fits by adjusting the max allowed footprint.
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700380 for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700381 size_t allocation_size = 0;
382 lots_of_objects[i] = space->AllocWithGrowth(self, 1024, &allocation_size);
Ian Rogersef7d42f2014-01-06 12:55:46 -0800383 EXPECT_TRUE(lots_of_objects[i] != nullptr);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800384 SirtRef<mirror::Object> obj(self, lots_of_objects[i]);
385 InstallClass(obj, 1024);
386 lots_of_objects[i] = obj.get();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700387 EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i]));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800388 }
389
Mathieu Chartier4e305412014-02-19 10:54:44 -0800390 // Release memory and check pointers are nullptr
391 // TODO: This isn't compaction safe, fix.
392 space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
393 for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
394 EXPECT_TRUE(lots_of_objects[i] == nullptr);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800395 }
396}
397
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800398TEST_F(SpaceTest, AllocAndFreeList_DlMallocSpace) {
399 AllocAndFreeListTestBody(SpaceTest::CreateDlMallocSpace);
400}
401TEST_F(SpaceTest, AllocAndFreeList_RosAllocSpace) {
402 AllocAndFreeListTestBody(SpaceTest::CreateRosAllocSpace);
403}
404
405void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
Ian Rogers3bb17a62012-01-27 23:56:44 -0800406 int round, size_t growth_limit) {
407 if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) ||
408 ((object_size < 0 && -object_size >= static_cast<intptr_t>(growth_limit)))) {
409 // No allocation can succeed
410 return;
411 }
Ian Rogers3bb17a62012-01-27 23:56:44 -0800412
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700413 // The space's footprint equals amount of resources requested from system
414 size_t footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800415
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700416 // The space must at least have its book keeping allocated
Ian Rogers3bb17a62012-01-27 23:56:44 -0800417 EXPECT_GT(footprint, 0u);
418
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700419 // But it shouldn't exceed the initial size
Ian Rogers3bb17a62012-01-27 23:56:44 -0800420 EXPECT_LE(footprint, growth_limit);
421
422 // space's size shouldn't exceed the initial size
423 EXPECT_LE(space->Size(), growth_limit);
424
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700425 // this invariant should always hold or else the space has grown to be larger than what the
Ian Rogers3bb17a62012-01-27 23:56:44 -0800426 // space believes its size is (which will break invariants)
427 EXPECT_GE(space->Size(), footprint);
428
429 // Fill the space with lots of small objects up to the growth limit
430 size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800431 UniquePtr<mirror::Object*[]> lots_of_objects(new mirror::Object*[max_objects]);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800432 size_t last_object = 0; // last object for which allocation succeeded
433 size_t amount_allocated = 0; // amount of space allocated
Ian Rogers50b35e22012-10-04 10:09:15 -0700434 Thread* self = Thread::Current();
Mathieu Chartier4e305412014-02-19 10:54:44 -0800435 ScopedObjectAccess soa(self);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700436 size_t rand_seed = 123456789;
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700437 for (size_t i = 0; i < max_objects; i++) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800438 size_t alloc_fails = 0; // number of failed allocations
439 size_t max_fails = 30; // number of times we fail allocation before giving up
440 for (; alloc_fails < max_fails; alloc_fails++) {
441 size_t alloc_size;
442 if (object_size > 0) {
443 alloc_size = object_size;
444 } else {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700445 alloc_size = test_rand(&rand_seed) % static_cast<size_t>(-object_size);
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -0800446 // Note the minimum size, which is the size of a zero-length byte array.
447 size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
448 if (alloc_size < size_of_zero_length_byte_array) {
449 alloc_size = size_of_zero_length_byte_array;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800450 }
451 }
Mathieu Chartier4e305412014-02-19 10:54:44 -0800452 SirtRef<mirror::Object> object(self, nullptr);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700453 size_t bytes_allocated = 0;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800454 if (round <= 1) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800455 object.reset(space->Alloc(self, alloc_size, &bytes_allocated));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800456 } else {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800457 object.reset(space->AllocWithGrowth(self, alloc_size, &bytes_allocated));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800458 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700459 footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800460 EXPECT_GE(space->Size(), footprint); // invariant
Mathieu Chartier4e305412014-02-19 10:54:44 -0800461 if (object.get() != nullptr) { // allocation succeeded
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700462 InstallClass(object, alloc_size);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800463 lots_of_objects[i] = object.get();
464 size_t allocation_size = space->AllocationSize(object.get());
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700465 EXPECT_EQ(bytes_allocated, allocation_size);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800466 if (object_size > 0) {
467 EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
468 } else {
469 EXPECT_GE(allocation_size, 8u);
470 }
471 amount_allocated += allocation_size;
472 break;
473 }
474 }
475 if (alloc_fails == max_fails) {
476 last_object = i;
477 break;
478 }
479 }
480 CHECK_NE(last_object, 0u); // we should have filled the space
481 EXPECT_GT(amount_allocated, 0u);
482
483 // We shouldn't have gone past the growth_limit
484 EXPECT_LE(amount_allocated, growth_limit);
485 EXPECT_LE(footprint, growth_limit);
486 EXPECT_LE(space->Size(), growth_limit);
487
488 // footprint and size should agree with amount allocated
489 EXPECT_GE(footprint, amount_allocated);
490 EXPECT_GE(space->Size(), amount_allocated);
491
492 // Release storage in a semi-adhoc manner
493 size_t free_increment = 96;
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700494 while (true) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800495 {
496 ScopedThreadStateChange tsc(self, kNative);
497 // Give the space a haircut.
498 space->Trim();
499 }
Ian Rogers3bb17a62012-01-27 23:56:44 -0800500
501 // Bounds sanity
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700502 footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800503 EXPECT_LE(amount_allocated, growth_limit);
504 EXPECT_GE(footprint, amount_allocated);
505 EXPECT_LE(footprint, growth_limit);
506 EXPECT_GE(space->Size(), amount_allocated);
507 EXPECT_LE(space->Size(), growth_limit);
508
509 if (free_increment == 0) {
510 break;
511 }
512
Mathieu Chartier4e305412014-02-19 10:54:44 -0800513 // Free some objects
514 for (size_t i = 0; i < last_object; i += free_increment) {
515 mirror::Object* object = lots_of_objects.get()[i];
516 if (object == nullptr) {
517 continue;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800518 }
Mathieu Chartier4e305412014-02-19 10:54:44 -0800519 size_t allocation_size = space->AllocationSize(object);
520 if (object_size > 0) {
521 EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
522 } else {
523 EXPECT_GE(allocation_size, 8u);
524 }
525 space->Free(self, object);
526 lots_of_objects.get()[i] = nullptr;
527 amount_allocated -= allocation_size;
528 footprint = space->GetFootprint();
529 EXPECT_GE(space->Size(), footprint); // invariant
Ian Rogers3bb17a62012-01-27 23:56:44 -0800530 }
Mathieu Chartier4e305412014-02-19 10:54:44 -0800531
532 free_increment >>= 1;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800533 }
Mathieu Chartier4e305412014-02-19 10:54:44 -0800534
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700535 // The space has become empty here before allocating a large object
536 // below. For RosAlloc, revoke thread-local runs, which are kept
537 // even when empty for a performance reason, so that they won't
538 // cause the following large object allocation to fail due to
539 // potential fragmentation. Note they are normally revoked at each
540 // GC (but no GC here.)
541 space->RevokeAllThreadLocalBuffers();
542
Ian Rogers3bb17a62012-01-27 23:56:44 -0800543 // All memory was released, try a large allocation to check freed memory is being coalesced
Mathieu Chartier4e305412014-02-19 10:54:44 -0800544 SirtRef<mirror::Object> large_object(self, nullptr);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800545 size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700546 size_t bytes_allocated = 0;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800547 if (round <= 1) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800548 large_object.reset(space->Alloc(self, three_quarters_space, &bytes_allocated));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800549 } else {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800550 large_object.reset(space->AllocWithGrowth(self, three_quarters_space, &bytes_allocated));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800551 }
Mathieu Chartier4e305412014-02-19 10:54:44 -0800552 EXPECT_TRUE(large_object.get() != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700553 InstallClass(large_object, three_quarters_space);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800554
555 // Sanity check footprint
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700556 footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800557 EXPECT_LE(footprint, growth_limit);
558 EXPECT_GE(space->Size(), footprint);
559 EXPECT_LE(space->Size(), growth_limit);
560
561 // Clean up
Mathieu Chartier4e305412014-02-19 10:54:44 -0800562 space->Free(self, large_object.reset(nullptr));
563
Ian Rogers3bb17a62012-01-27 23:56:44 -0800564 // Sanity check footprint
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700565 footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800566 EXPECT_LE(footprint, growth_limit);
567 EXPECT_GE(space->Size(), footprint);
568 EXPECT_LE(space->Size(), growth_limit);
569}
570
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800571void SpaceTest::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space) {
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -0800572 if (object_size < SizeOfZeroLengthByteArray()) {
573 // Too small for the object layout/model.
574 return;
575 }
Ian Rogers3bb17a62012-01-27 23:56:44 -0800576 size_t initial_size = 4 * MB;
577 size_t growth_limit = 8 * MB;
578 size_t capacity = 16 * MB;
Mathieu Chartier4e305412014-02-19 10:54:44 -0800579 MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr));
580 ASSERT_TRUE(space != nullptr);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800581
582 // Basic sanity
583 EXPECT_EQ(space->Capacity(), growth_limit);
584 EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
585
586 // Make space findable to the heap, will also delete space when runtime is cleaned up
Mathieu Chartier590fee92013-09-13 13:46:47 -0700587 AddSpace(space);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800588
589 // In this round we don't allocate with growth and therefore can't grow past the initial size.
590 // This effectively makes the growth_limit the initial_size, so assert this.
591 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 1, initial_size);
592 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 2, growth_limit);
593 // Remove growth limit
594 space->ClearGrowthLimit();
595 EXPECT_EQ(space->Capacity(), capacity);
596 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 3, capacity);
597}
598
599#define TEST_SizeFootPrintGrowthLimitAndTrim(name, size) \
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800600 TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name##_DlMallocSpace) { \
601 SizeFootPrintGrowthLimitAndTrimDriver(size, SpaceTest::CreateDlMallocSpace); \
Ian Rogers3bb17a62012-01-27 23:56:44 -0800602 } \
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800603 TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name##_DlMallocSpace) { \
604 SizeFootPrintGrowthLimitAndTrimDriver(-size, SpaceTest::CreateDlMallocSpace); \
605 } \
606 TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name##_RosAllocSpace) { \
607 SizeFootPrintGrowthLimitAndTrimDriver(size, SpaceTest::CreateRosAllocSpace); \
608 } \
609 TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name##_RosAllocSpace) { \
610 SizeFootPrintGrowthLimitAndTrimDriver(-size, SpaceTest::CreateRosAllocSpace); \
Ian Rogers3bb17a62012-01-27 23:56:44 -0800611 }
612
613// Each size test is its own test so that we get a fresh heap each time
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800614TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_12B_DlMallocSpace) {
615 SizeFootPrintGrowthLimitAndTrimDriver(12, SpaceTest::CreateDlMallocSpace);
616}
617TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_12B_RosAllocSpace) {
618 SizeFootPrintGrowthLimitAndTrimDriver(12, SpaceTest::CreateRosAllocSpace);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800619}
620TEST_SizeFootPrintGrowthLimitAndTrim(16B, 16)
621TEST_SizeFootPrintGrowthLimitAndTrim(24B, 24)
622TEST_SizeFootPrintGrowthLimitAndTrim(32B, 32)
623TEST_SizeFootPrintGrowthLimitAndTrim(64B, 64)
624TEST_SizeFootPrintGrowthLimitAndTrim(128B, 128)
625TEST_SizeFootPrintGrowthLimitAndTrim(1KB, 1 * KB)
626TEST_SizeFootPrintGrowthLimitAndTrim(4KB, 4 * KB)
627TEST_SizeFootPrintGrowthLimitAndTrim(1MB, 1 * MB)
628TEST_SizeFootPrintGrowthLimitAndTrim(4MB, 4 * MB)
629TEST_SizeFootPrintGrowthLimitAndTrim(8MB, 8 * MB)
630
Ian Rogers1d54e732013-05-02 21:10:01 -0700631} // namespace space
632} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -0700633} // namespace art