blob: 4d2db11ac2206bffb6d211b91a35b096c70e392b [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Andreas Gampea7433512014-02-21 13:19:23 -080017#ifndef ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
18#define ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
19
Brian Carlstroma1ce1fe2014-02-24 23:23:58 -080020#include <stdint.h>
Ian Rogers700a4022014-05-19 16:49:03 -070021#include <memory>
Brian Carlstroma1ce1fe2014-02-24 23:23:58 -080022
23#include "common_runtime_test.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070024#include "globals.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070025#include "mirror/array-inl.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070026#include "mirror/class-inl.h"
27#include "mirror/class_loader.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070028#include "mirror/object-inl.h"
Ian Rogerse63db272014-07-15 15:36:11 -070029#include "scoped_thread_state_change.h"
30#include "zygote_space.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070031
32namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070033namespace gc {
34namespace space {
Carl Shapiro69759ea2011-07-21 18:13:35 -070035
Brian Carlstroma1ce1fe2014-02-24 23:23:58 -080036class SpaceTest : public CommonRuntimeTest {
Ian Rogers3bb17a62012-01-27 23:56:44 -080037 public:
Mathieu Chartier5647d182014-03-07 15:00:39 -080038 jobject byte_array_class_;
39
40 SpaceTest() : byte_array_class_(nullptr) {
41 }
42
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -070043 void AddSpace(ContinuousSpace* space, bool revoke = true) {
44 Heap* heap = Runtime::Current()->GetHeap();
45 if (revoke) {
46 heap->RevokeAllThreadLocalBuffers();
47 }
48 heap->AddSpace(space);
49 heap->SetSpaceAsDefault(space);
Ian Rogers1d54e732013-05-02 21:10:01 -070050 }
Mathieu Chartier5647d182014-03-07 15:00:39 -080051
Mathieu Chartier90443472015-07-16 20:32:27 -070052 mirror::Class* GetByteArrayClass(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070053 StackHandleScope<1> hs(self);
54 auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
Mathieu Chartier5647d182014-03-07 15:00:39 -080055 if (byte_array_class_ == nullptr) {
56 mirror::Class* byte_array_class =
57 Runtime::Current()->GetClassLinker()->FindClass(self, "[B", null_loader);
58 EXPECT_TRUE(byte_array_class != nullptr);
59 byte_array_class_ = self->GetJniEnv()->NewLocalRef(byte_array_class);
60 EXPECT_TRUE(byte_array_class_ != nullptr);
61 }
62 return reinterpret_cast<mirror::Class*>(self->DecodeJObject(byte_array_class_));
63 }
64
65 mirror::Object* Alloc(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070066 size_t* bytes_allocated, size_t* usable_size,
67 size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -070068 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070069 StackHandleScope<1> hs(self);
70 Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070071 mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size,
72 bytes_tl_bulk_allocated);
Mathieu Chartier5647d182014-03-07 15:00:39 -080073 if (obj != nullptr) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070074 InstallClass(obj, byte_array_class.Get(), bytes);
Mathieu Chartier5647d182014-03-07 15:00:39 -080075 }
76 return obj;
77 }
78
79 mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070080 size_t* bytes_allocated, size_t* usable_size,
81 size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -070082 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070083 StackHandleScope<1> hs(self);
84 Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070085 mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size,
86 bytes_tl_bulk_allocated);
Mathieu Chartier5647d182014-03-07 15:00:39 -080087 if (obj != nullptr) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070088 InstallClass(obj, byte_array_class.Get(), bytes);
Mathieu Chartier5647d182014-03-07 15:00:39 -080089 }
90 return obj;
91 }
92
93 void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size)
Mathieu Chartier90443472015-07-16 20:32:27 -070094 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -080095 // Note the minimum size, which is the size of a zero-length byte array.
96 EXPECT_GE(size, SizeOfZeroLengthByteArray());
Mathieu Chartier4e305412014-02-19 10:54:44 -080097 EXPECT_TRUE(byte_array_class != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070098 o->SetClass(byte_array_class);
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070099 if (kUseBakerOrBrooksReadBarrier) {
100 // Like the proper heap object allocation, install and verify
101 // the correct read barrier pointer.
102 if (kUseBrooksReadBarrier) {
103 o->SetReadBarrierPointer(o);
104 }
105 o->AssertReadBarrierPointer();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800106 }
Mathieu Chartier4e305412014-02-19 10:54:44 -0800107 mirror::Array* arr = o->AsArray<kVerifyNone>();
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -0800108 size_t header_size = SizeOfZeroLengthByteArray();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700109 int32_t length = size - header_size;
110 arr->SetLength(length);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800111 EXPECT_EQ(arr->SizeOf<kVerifyNone>(), size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700112 }
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800113
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -0800114 static size_t SizeOfZeroLengthByteArray() {
115 return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value();
116 }
117
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800118 typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
Ian Rogers13735952014-10-08 12:43:28 -0700119 size_t capacity, uint8_t* requested_begin);
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800120 void InitTestBody(CreateSpaceFn create_space);
121 void ZygoteSpaceTestBody(CreateSpaceFn create_space);
122 void AllocAndFreeTestBody(CreateSpaceFn create_space);
123 void AllocAndFreeListTestBody(CreateSpaceFn create_space);
124
125 void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
126 int round, size_t growth_limit);
127 void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800128};
Brian Carlstrom9b7f2c22011-09-27 14:35:04 -0700129
Ian Rogers719d1a32014-03-06 12:13:39 -0800130static inline size_t test_rand(size_t* seed) {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700131 *seed = *seed * 1103515245 + 12345;
132 return *seed;
133}
134
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800135void SpaceTest::InitTestBody(CreateSpaceFn create_space) {
Andreas Gampe369810a2015-01-14 19:53:31 -0800136 // This will lead to error messages in the log.
137 ScopedLogSeverity sls(LogSeverity::FATAL);
138
Carl Shapiro69759ea2011-07-21 18:13:35 -0700139 {
jeffhaoc1160702011-10-27 15:48:45 -0700140 // Init < max == growth
Ian Rogers700a4022014-05-19 16:49:03 -0700141 std::unique_ptr<Space> space(create_space("test", 16 * MB, 32 * MB, 32 * MB, nullptr));
Mathieu Chartier4e305412014-02-19 10:54:44 -0800142 EXPECT_TRUE(space.get() != nullptr);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700143 }
144 {
jeffhaoc1160702011-10-27 15:48:45 -0700145 // Init == max == growth
Ian Rogers700a4022014-05-19 16:49:03 -0700146 std::unique_ptr<Space> space(create_space("test", 16 * MB, 16 * MB, 16 * MB, nullptr));
Mathieu Chartier4e305412014-02-19 10:54:44 -0800147 EXPECT_TRUE(space.get() != nullptr);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700148 }
149 {
jeffhaoc1160702011-10-27 15:48:45 -0700150 // Init > max == growth
Ian Rogers700a4022014-05-19 16:49:03 -0700151 std::unique_ptr<Space> space(create_space("test", 32 * MB, 16 * MB, 16 * MB, nullptr));
Mathieu Chartier4e305412014-02-19 10:54:44 -0800152 EXPECT_TRUE(space.get() == nullptr);
jeffhaoc1160702011-10-27 15:48:45 -0700153 }
154 {
155 // Growth == init < max
Ian Rogers700a4022014-05-19 16:49:03 -0700156 std::unique_ptr<Space> space(create_space("test", 16 * MB, 16 * MB, 32 * MB, nullptr));
Mathieu Chartier4e305412014-02-19 10:54:44 -0800157 EXPECT_TRUE(space.get() != nullptr);
jeffhaoc1160702011-10-27 15:48:45 -0700158 }
159 {
160 // Growth < init < max
Ian Rogers700a4022014-05-19 16:49:03 -0700161 std::unique_ptr<Space> space(create_space("test", 16 * MB, 8 * MB, 32 * MB, nullptr));
Mathieu Chartier4e305412014-02-19 10:54:44 -0800162 EXPECT_TRUE(space.get() == nullptr);
jeffhaoc1160702011-10-27 15:48:45 -0700163 }
164 {
165 // Init < growth < max
Ian Rogers700a4022014-05-19 16:49:03 -0700166 std::unique_ptr<Space> space(create_space("test", 8 * MB, 16 * MB, 32 * MB, nullptr));
Mathieu Chartier4e305412014-02-19 10:54:44 -0800167 EXPECT_TRUE(space.get() != nullptr);
jeffhaoc1160702011-10-27 15:48:45 -0700168 }
169 {
170 // Init < max < growth
Ian Rogers700a4022014-05-19 16:49:03 -0700171 std::unique_ptr<Space> space(create_space("test", 8 * MB, 32 * MB, 16 * MB, nullptr));
Mathieu Chartier4e305412014-02-19 10:54:44 -0800172 EXPECT_TRUE(space.get() == nullptr);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700173 }
174}
175
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700176// TODO: This test is not very good, we should improve it.
177// The test should do more allocations before the creation of the ZygoteSpace, and then do
178// allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
179// the GC works with the ZygoteSpace.
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800180void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
Ian Rogers6fac4472014-02-25 17:01:10 -0800181 size_t dummy;
Mathieu Chartier4e305412014-02-19 10:54:44 -0800182 MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
183 ASSERT_TRUE(space != nullptr);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700184
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800185 // Make space findable to the heap, will also delete space when runtime is cleaned up
186 AddSpace(space);
187 Thread* self = Thread::Current();
Mathieu Chartier4e305412014-02-19 10:54:44 -0800188 ScopedObjectAccess soa(self);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700189
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800190 // Succeeds, fits without adjusting the footprint limit.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700191 size_t ptr1_bytes_allocated, ptr1_usable_size, ptr1_bytes_tl_bulk_allocated;
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700192 StackHandleScope<3> hs(soa.Self());
Andreas Gampe5a4b8a22014-09-11 08:30:08 -0700193 MutableHandle<mirror::Object> ptr1(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700194 hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size,
195 &ptr1_bytes_tl_bulk_allocated)));
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700196 EXPECT_TRUE(ptr1.Get() != nullptr);
Ian Rogers6fac4472014-02-25 17:01:10 -0800197 EXPECT_LE(1U * MB, ptr1_bytes_allocated);
198 EXPECT_LE(1U * MB, ptr1_usable_size);
199 EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700200 EXPECT_EQ(ptr1_bytes_tl_bulk_allocated, ptr1_bytes_allocated);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700201
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800202 // Fails, requires a higher footprint limit.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700203 mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800204 EXPECT_TRUE(ptr2 == nullptr);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700205
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800206 // Succeeds, adjusts the footprint.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700207 size_t ptr3_bytes_allocated, ptr3_usable_size, ptr3_bytes_tl_bulk_allocated;
Andreas Gampe5a4b8a22014-09-11 08:30:08 -0700208 MutableHandle<mirror::Object> ptr3(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700209 hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size,
210 &ptr3_bytes_tl_bulk_allocated)));
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700211 EXPECT_TRUE(ptr3.Get() != nullptr);
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800212 EXPECT_LE(8U * MB, ptr3_bytes_allocated);
Ian Rogers6fac4472014-02-25 17:01:10 -0800213 EXPECT_LE(8U * MB, ptr3_usable_size);
214 EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700215 EXPECT_EQ(ptr3_bytes_tl_bulk_allocated, ptr3_bytes_allocated);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700216
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800217 // Fails, requires a higher footprint limit.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700218 mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy, nullptr, &dummy);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800219 EXPECT_TRUE(ptr4 == nullptr);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700220
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800221 // Also fails, requires a higher allowed footprint.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700222 mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy, nullptr, &dummy);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800223 EXPECT_TRUE(ptr5 == nullptr);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700224
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800225 // Release some memory.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700226 size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800227 EXPECT_EQ(free3, ptr3_bytes_allocated);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700228 EXPECT_EQ(free3, space->Free(self, ptr3.Assign(nullptr)));
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800229 EXPECT_LE(8U * MB, free3);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700230
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800231 // Succeeds, now that memory has been freed.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700232 size_t ptr6_bytes_allocated, ptr6_usable_size, ptr6_bytes_tl_bulk_allocated;
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700233 Handle<mirror::Object> ptr6(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700234 hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size,
235 &ptr6_bytes_tl_bulk_allocated)));
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700236 EXPECT_TRUE(ptr6.Get() != nullptr);
Ian Rogers6fac4472014-02-25 17:01:10 -0800237 EXPECT_LE(9U * MB, ptr6_bytes_allocated);
238 EXPECT_LE(9U * MB, ptr6_usable_size);
239 EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700240 EXPECT_EQ(ptr6_bytes_tl_bulk_allocated, ptr6_bytes_allocated);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700241
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800242 // Final clean up.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700243 size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
244 space->Free(self, ptr1.Assign(nullptr));
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800245 EXPECT_LE(1U * MB, free1);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700246
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800247 // Make sure that the zygote space isn't directly at the start of the space.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700248 EXPECT_TRUE(space->Alloc(self, 1U * MB, &dummy, nullptr, &dummy) != nullptr);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800249
250 gc::Heap* heap = Runtime::Current()->GetHeap();
251 space::Space* old_space = space;
252 heap->RemoveSpace(old_space);
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700253 heap->RevokeAllThreadLocalBuffers();
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800254 space::ZygoteSpace* zygote_space = space->CreateZygoteSpace("alloc space",
255 heap->IsLowMemoryMode(),
256 &space);
257 delete old_space;
258 // Add the zygote space.
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700259 AddSpace(zygote_space, false);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700260
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800261 // Make space findable to the heap, will also delete space when runtime is cleaned up
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700262 AddSpace(space, false);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700263
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800264 // Succeeds, fits without adjusting the footprint limit.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700265 ptr1.Assign(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size,
266 &ptr1_bytes_tl_bulk_allocated));
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700267 EXPECT_TRUE(ptr1.Get() != nullptr);
Ian Rogers6fac4472014-02-25 17:01:10 -0800268 EXPECT_LE(1U * MB, ptr1_bytes_allocated);
269 EXPECT_LE(1U * MB, ptr1_usable_size);
270 EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700271 EXPECT_EQ(ptr1_bytes_tl_bulk_allocated, ptr1_bytes_allocated);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700272
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800273 // Fails, requires a higher footprint limit.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700274 ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800275 EXPECT_TRUE(ptr2 == nullptr);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700276
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800277 // Succeeds, adjusts the footprint.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700278 ptr3.Assign(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size,
279 &ptr3_bytes_tl_bulk_allocated));
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700280 EXPECT_TRUE(ptr3.Get() != nullptr);
Ian Rogers6fac4472014-02-25 17:01:10 -0800281 EXPECT_LE(2U * MB, ptr3_bytes_allocated);
282 EXPECT_LE(2U * MB, ptr3_usable_size);
283 EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700284 EXPECT_EQ(ptr3_bytes_tl_bulk_allocated, ptr3_bytes_allocated);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700285 space->Free(self, ptr3.Assign(nullptr));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700286
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800287 // Final clean up.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700288 free1 = space->AllocationSize(ptr1.Get(), nullptr);
289 space->Free(self, ptr1.Assign(nullptr));
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800290 EXPECT_LE(1U * MB, free1);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700291}
292
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800293void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700294 size_t dummy = 0;
Mathieu Chartier4e305412014-02-19 10:54:44 -0800295 MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
296 ASSERT_TRUE(space != nullptr);
Ian Rogers50b35e22012-10-04 10:09:15 -0700297 Thread* self = Thread::Current();
Mathieu Chartier4e305412014-02-19 10:54:44 -0800298 ScopedObjectAccess soa(self);
Ian Rogers30fab402012-01-23 15:43:46 -0800299
Ian Rogers3bb17a62012-01-27 23:56:44 -0800300 // Make space findable to the heap, will also delete space when runtime is cleaned up
Mathieu Chartier590fee92013-09-13 13:46:47 -0700301 AddSpace(space);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700302
Ian Rogers3bb17a62012-01-27 23:56:44 -0800303 // Succeeds, fits without adjusting the footprint limit.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700304 size_t ptr1_bytes_allocated, ptr1_usable_size, ptr1_bytes_tl_bulk_allocated;
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700305 StackHandleScope<3> hs(soa.Self());
Andreas Gampe5a4b8a22014-09-11 08:30:08 -0700306 MutableHandle<mirror::Object> ptr1(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700307 hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size,
308 &ptr1_bytes_tl_bulk_allocated)));
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700309 EXPECT_TRUE(ptr1.Get() != nullptr);
Ian Rogers6fac4472014-02-25 17:01:10 -0800310 EXPECT_LE(1U * MB, ptr1_bytes_allocated);
311 EXPECT_LE(1U * MB, ptr1_usable_size);
312 EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700313 EXPECT_EQ(ptr1_bytes_tl_bulk_allocated, ptr1_bytes_allocated);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700314
Ian Rogers3bb17a62012-01-27 23:56:44 -0800315 // Fails, requires a higher footprint limit.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700316 mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800317 EXPECT_TRUE(ptr2 == nullptr);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700318
319 // Succeeds, adjusts the footprint.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700320 size_t ptr3_bytes_allocated, ptr3_usable_size, ptr3_bytes_tl_bulk_allocated;
Andreas Gampe5a4b8a22014-09-11 08:30:08 -0700321 MutableHandle<mirror::Object> ptr3(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700322 hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size,
323 &ptr3_bytes_tl_bulk_allocated)));
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700324 EXPECT_TRUE(ptr3.Get() != nullptr);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700325 EXPECT_LE(8U * MB, ptr3_bytes_allocated);
Ian Rogers6fac4472014-02-25 17:01:10 -0800326 EXPECT_LE(8U * MB, ptr3_usable_size);
327 EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700328 EXPECT_EQ(ptr3_bytes_tl_bulk_allocated, ptr3_bytes_allocated);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700329
Ian Rogers3bb17a62012-01-27 23:56:44 -0800330 // Fails, requires a higher footprint limit.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700331 mirror::Object* ptr4 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800332 EXPECT_TRUE(ptr4 == nullptr);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700333
334 // Also fails, requires a higher allowed footprint.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700335 mirror::Object* ptr5 = AllocWithGrowth(space, self, 8 * MB, &dummy, nullptr, &dummy);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800336 EXPECT_TRUE(ptr5 == nullptr);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700337
338 // Release some memory.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700339 size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700340 EXPECT_EQ(free3, ptr3_bytes_allocated);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700341 space->Free(self, ptr3.Assign(nullptr));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700342 EXPECT_LE(8U * MB, free3);
343
344 // Succeeds, now that memory has been freed.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700345 size_t ptr6_bytes_allocated, ptr6_usable_size, ptr6_bytes_tl_bulk_allocated;
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700346 Handle<mirror::Object> ptr6(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700347 hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size,
348 &ptr6_bytes_tl_bulk_allocated)));
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700349 EXPECT_TRUE(ptr6.Get() != nullptr);
Ian Rogers6fac4472014-02-25 17:01:10 -0800350 EXPECT_LE(9U * MB, ptr6_bytes_allocated);
351 EXPECT_LE(9U * MB, ptr6_usable_size);
352 EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700353 EXPECT_EQ(ptr6_bytes_tl_bulk_allocated, ptr6_bytes_allocated);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700354
355 // Final clean up.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700356 size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
357 space->Free(self, ptr1.Assign(nullptr));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700358 EXPECT_LE(1U * MB, free1);
359}
360
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800361void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800362 MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
363 ASSERT_TRUE(space != nullptr);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800364
365 // Make space findable to the heap, will also delete space when runtime is cleaned up
Mathieu Chartier590fee92013-09-13 13:46:47 -0700366 AddSpace(space);
Ian Rogers50b35e22012-10-04 10:09:15 -0700367 Thread* self = Thread::Current();
Mathieu Chartier4e305412014-02-19 10:54:44 -0800368 ScopedObjectAccess soa(self);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800369
370 // Succeeds, fits without adjusting the max allowed footprint.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800371 mirror::Object* lots_of_objects[1024];
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700372 for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700373 size_t allocation_size, usable_size, bytes_tl_bulk_allocated;
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -0800374 size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
Mathieu Chartier5647d182014-03-07 15:00:39 -0800375 lots_of_objects[i] = Alloc(space, self, size_of_zero_length_byte_array, &allocation_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700376 &usable_size, &bytes_tl_bulk_allocated);
Ian Rogersef7d42f2014-01-06 12:55:46 -0800377 EXPECT_TRUE(lots_of_objects[i] != nullptr);
Ian Rogers6fac4472014-02-25 17:01:10 -0800378 size_t computed_usable_size;
379 EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
380 EXPECT_EQ(usable_size, computed_usable_size);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700381 EXPECT_TRUE(bytes_tl_bulk_allocated == 0 ||
382 bytes_tl_bulk_allocated >= allocation_size);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800383 }
384
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700385 // Release memory.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800386 space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800387
388 // Succeeds, fits by adjusting the max allowed footprint.
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700389 for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700390 size_t allocation_size, usable_size, bytes_tl_bulk_allocated;
391 lots_of_objects[i] = AllocWithGrowth(space, self, 1024, &allocation_size, &usable_size,
392 &bytes_tl_bulk_allocated);
Ian Rogersef7d42f2014-01-06 12:55:46 -0800393 EXPECT_TRUE(lots_of_objects[i] != nullptr);
Ian Rogers6fac4472014-02-25 17:01:10 -0800394 size_t computed_usable_size;
395 EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
396 EXPECT_EQ(usable_size, computed_usable_size);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700397 EXPECT_TRUE(bytes_tl_bulk_allocated == 0 ||
398 bytes_tl_bulk_allocated >= allocation_size);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800399 }
400
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700401 // Release memory.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800402 space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800403}
404
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800405void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
Ian Rogers3bb17a62012-01-27 23:56:44 -0800406 int round, size_t growth_limit) {
407 if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) ||
408 ((object_size < 0 && -object_size >= static_cast<intptr_t>(growth_limit)))) {
409 // No allocation can succeed
410 return;
411 }
Ian Rogers3bb17a62012-01-27 23:56:44 -0800412
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700413 // The space's footprint equals amount of resources requested from system
414 size_t footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800415
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700416 // The space must at least have its book keeping allocated
Ian Rogers3bb17a62012-01-27 23:56:44 -0800417 EXPECT_GT(footprint, 0u);
418
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700419 // But it shouldn't exceed the initial size
Ian Rogers3bb17a62012-01-27 23:56:44 -0800420 EXPECT_LE(footprint, growth_limit);
421
422 // space's size shouldn't exceed the initial size
423 EXPECT_LE(space->Size(), growth_limit);
424
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700425 // this invariant should always hold or else the space has grown to be larger than what the
Ian Rogers3bb17a62012-01-27 23:56:44 -0800426 // space believes its size is (which will break invariants)
427 EXPECT_GE(space->Size(), footprint);
428
429 // Fill the space with lots of small objects up to the growth limit
430 size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1;
Ian Rogers700a4022014-05-19 16:49:03 -0700431 std::unique_ptr<mirror::Object*[]> lots_of_objects(new mirror::Object*[max_objects]);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800432 size_t last_object = 0; // last object for which allocation succeeded
433 size_t amount_allocated = 0; // amount of space allocated
Ian Rogers50b35e22012-10-04 10:09:15 -0700434 Thread* self = Thread::Current();
Mathieu Chartier4e305412014-02-19 10:54:44 -0800435 ScopedObjectAccess soa(self);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700436 size_t rand_seed = 123456789;
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700437 for (size_t i = 0; i < max_objects; i++) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800438 size_t alloc_fails = 0; // number of failed allocations
439 size_t max_fails = 30; // number of times we fail allocation before giving up
440 for (; alloc_fails < max_fails; alloc_fails++) {
441 size_t alloc_size;
442 if (object_size > 0) {
443 alloc_size = object_size;
444 } else {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700445 alloc_size = test_rand(&rand_seed) % static_cast<size_t>(-object_size);
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -0800446 // Note the minimum size, which is the size of a zero-length byte array.
447 size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
448 if (alloc_size < size_of_zero_length_byte_array) {
449 alloc_size = size_of_zero_length_byte_array;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800450 }
451 }
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700452 StackHandleScope<1> hs(soa.Self());
453 auto object(hs.NewHandle<mirror::Object>(nullptr));
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700454 size_t bytes_allocated = 0;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700455 size_t bytes_tl_bulk_allocated;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800456 if (round <= 1) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700457 object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr,
458 &bytes_tl_bulk_allocated));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800459 } else {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700460 object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr,
461 &bytes_tl_bulk_allocated));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800462 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700463 footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800464 EXPECT_GE(space->Size(), footprint); // invariant
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700465 if (object.Get() != nullptr) { // allocation succeeded
466 lots_of_objects[i] = object.Get();
467 size_t allocation_size = space->AllocationSize(object.Get(), nullptr);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700468 EXPECT_EQ(bytes_allocated, allocation_size);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800469 if (object_size > 0) {
470 EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
471 } else {
472 EXPECT_GE(allocation_size, 8u);
473 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700474 EXPECT_TRUE(bytes_tl_bulk_allocated == 0 ||
475 bytes_tl_bulk_allocated >= allocation_size);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800476 amount_allocated += allocation_size;
477 break;
478 }
479 }
480 if (alloc_fails == max_fails) {
481 last_object = i;
482 break;
483 }
484 }
485 CHECK_NE(last_object, 0u); // we should have filled the space
486 EXPECT_GT(amount_allocated, 0u);
487
488 // We shouldn't have gone past the growth_limit
489 EXPECT_LE(amount_allocated, growth_limit);
490 EXPECT_LE(footprint, growth_limit);
491 EXPECT_LE(space->Size(), growth_limit);
492
493 // footprint and size should agree with amount allocated
494 EXPECT_GE(footprint, amount_allocated);
495 EXPECT_GE(space->Size(), amount_allocated);
496
497 // Release storage in a semi-adhoc manner
498 size_t free_increment = 96;
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700499 while (true) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800500 {
501 ScopedThreadStateChange tsc(self, kNative);
502 // Give the space a haircut.
503 space->Trim();
504 }
Ian Rogers3bb17a62012-01-27 23:56:44 -0800505
506 // Bounds sanity
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700507 footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800508 EXPECT_LE(amount_allocated, growth_limit);
509 EXPECT_GE(footprint, amount_allocated);
510 EXPECT_LE(footprint, growth_limit);
511 EXPECT_GE(space->Size(), amount_allocated);
512 EXPECT_LE(space->Size(), growth_limit);
513
514 if (free_increment == 0) {
515 break;
516 }
517
Mathieu Chartier4e305412014-02-19 10:54:44 -0800518 // Free some objects
519 for (size_t i = 0; i < last_object; i += free_increment) {
520 mirror::Object* object = lots_of_objects.get()[i];
521 if (object == nullptr) {
522 continue;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800523 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800524 size_t allocation_size = space->AllocationSize(object, nullptr);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800525 if (object_size > 0) {
526 EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
527 } else {
528 EXPECT_GE(allocation_size, 8u);
529 }
530 space->Free(self, object);
531 lots_of_objects.get()[i] = nullptr;
532 amount_allocated -= allocation_size;
533 footprint = space->GetFootprint();
534 EXPECT_GE(space->Size(), footprint); // invariant
Ian Rogers3bb17a62012-01-27 23:56:44 -0800535 }
Mathieu Chartier4e305412014-02-19 10:54:44 -0800536
537 free_increment >>= 1;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800538 }
Mathieu Chartier4e305412014-02-19 10:54:44 -0800539
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700540 // The space has become empty here before allocating a large object
541 // below. For RosAlloc, revoke thread-local runs, which are kept
542 // even when empty for a performance reason, so that they won't
543 // cause the following large object allocation to fail due to
544 // potential fragmentation. Note they are normally revoked at each
545 // GC (but no GC here.)
546 space->RevokeAllThreadLocalBuffers();
547
Ian Rogers3bb17a62012-01-27 23:56:44 -0800548 // All memory was released, try a large allocation to check freed memory is being coalesced
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700549 StackHandleScope<1> hs(soa.Self());
550 auto large_object(hs.NewHandle<mirror::Object>(nullptr));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800551 size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700552 size_t bytes_allocated = 0;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700553 size_t bytes_tl_bulk_allocated;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800554 if (round <= 1) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700555 large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr,
556 &bytes_tl_bulk_allocated));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800557 } else {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700558 large_object.Assign(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700559 nullptr, &bytes_tl_bulk_allocated));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800560 }
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700561 EXPECT_TRUE(large_object.Get() != nullptr);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800562
563 // Sanity check footprint
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700564 footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800565 EXPECT_LE(footprint, growth_limit);
566 EXPECT_GE(space->Size(), footprint);
567 EXPECT_LE(space->Size(), growth_limit);
568
569 // Clean up
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700570 space->Free(self, large_object.Assign(nullptr));
Mathieu Chartier4e305412014-02-19 10:54:44 -0800571
Ian Rogers3bb17a62012-01-27 23:56:44 -0800572 // Sanity check footprint
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700573 footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800574 EXPECT_LE(footprint, growth_limit);
575 EXPECT_GE(space->Size(), footprint);
576 EXPECT_LE(space->Size(), growth_limit);
577}
578
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800579void SpaceTest::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space) {
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -0800580 if (object_size < SizeOfZeroLengthByteArray()) {
581 // Too small for the object layout/model.
582 return;
583 }
Ian Rogers3bb17a62012-01-27 23:56:44 -0800584 size_t initial_size = 4 * MB;
585 size_t growth_limit = 8 * MB;
586 size_t capacity = 16 * MB;
Mathieu Chartier4e305412014-02-19 10:54:44 -0800587 MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr));
588 ASSERT_TRUE(space != nullptr);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800589
590 // Basic sanity
591 EXPECT_EQ(space->Capacity(), growth_limit);
592 EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
593
594 // Make space findable to the heap, will also delete space when runtime is cleaned up
Mathieu Chartier590fee92013-09-13 13:46:47 -0700595 AddSpace(space);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800596
597 // In this round we don't allocate with growth and therefore can't grow past the initial size.
598 // This effectively makes the growth_limit the initial_size, so assert this.
599 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 1, initial_size);
600 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 2, growth_limit);
601 // Remove growth limit
602 space->ClearGrowthLimit();
603 EXPECT_EQ(space->Capacity(), capacity);
604 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 3, capacity);
605}
606
Andreas Gampe24651ec2014-02-27 13:26:16 -0800607#define TEST_SizeFootPrintGrowthLimitAndTrimStatic(name, spaceName, spaceFn, size) \
608 TEST_F(spaceName##StaticTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name) { \
Andreas Gampea7433512014-02-21 13:19:23 -0800609 SizeFootPrintGrowthLimitAndTrimDriver(size, spaceFn); \
Andreas Gampe24651ec2014-02-27 13:26:16 -0800610 }
611
612#define TEST_SizeFootPrintGrowthLimitAndTrimRandom(name, spaceName, spaceFn, size) \
613 TEST_F(spaceName##RandomTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \
Andreas Gampea7433512014-02-21 13:19:23 -0800614 SizeFootPrintGrowthLimitAndTrimDriver(-size, spaceFn); \
Ian Rogers3bb17a62012-01-27 23:56:44 -0800615 }
616
Andreas Gampe24651ec2014-02-27 13:26:16 -0800617#define TEST_SPACE_CREATE_FN_BASE(spaceName, spaceFn) \
618 class spaceName##BaseTest : public SpaceTest { \
Andreas Gampea7433512014-02-21 13:19:23 -0800619 }; \
620 \
Andreas Gampe24651ec2014-02-27 13:26:16 -0800621 TEST_F(spaceName##BaseTest, Init) { \
Andreas Gampea7433512014-02-21 13:19:23 -0800622 InitTestBody(spaceFn); \
623 } \
Andreas Gampe24651ec2014-02-27 13:26:16 -0800624 TEST_F(spaceName##BaseTest, ZygoteSpace) { \
Andreas Gampea7433512014-02-21 13:19:23 -0800625 ZygoteSpaceTestBody(spaceFn); \
626 } \
Andreas Gampe24651ec2014-02-27 13:26:16 -0800627 TEST_F(spaceName##BaseTest, AllocAndFree) { \
Andreas Gampea7433512014-02-21 13:19:23 -0800628 AllocAndFreeTestBody(spaceFn); \
629 } \
Andreas Gampe24651ec2014-02-27 13:26:16 -0800630 TEST_F(spaceName##BaseTest, AllocAndFreeList) { \
Andreas Gampea7433512014-02-21 13:19:23 -0800631 AllocAndFreeListTestBody(spaceFn); \
Andreas Gampe24651ec2014-02-27 13:26:16 -0800632 }
633
634#define TEST_SPACE_CREATE_FN_STATIC(spaceName, spaceFn) \
635 class spaceName##StaticTest : public SpaceTest { \
636 }; \
637 \
638 TEST_SizeFootPrintGrowthLimitAndTrimStatic(12B, spaceName, spaceFn, 12) \
639 TEST_SizeFootPrintGrowthLimitAndTrimStatic(16B, spaceName, spaceFn, 16) \
640 TEST_SizeFootPrintGrowthLimitAndTrimStatic(24B, spaceName, spaceFn, 24) \
641 TEST_SizeFootPrintGrowthLimitAndTrimStatic(32B, spaceName, spaceFn, 32) \
642 TEST_SizeFootPrintGrowthLimitAndTrimStatic(64B, spaceName, spaceFn, 64) \
643 TEST_SizeFootPrintGrowthLimitAndTrimStatic(128B, spaceName, spaceFn, 128) \
644 TEST_SizeFootPrintGrowthLimitAndTrimStatic(1KB, spaceName, spaceFn, 1 * KB) \
645 TEST_SizeFootPrintGrowthLimitAndTrimStatic(4KB, spaceName, spaceFn, 4 * KB) \
646 TEST_SizeFootPrintGrowthLimitAndTrimStatic(1MB, spaceName, spaceFn, 1 * MB) \
647 TEST_SizeFootPrintGrowthLimitAndTrimStatic(4MB, spaceName, spaceFn, 4 * MB) \
648 TEST_SizeFootPrintGrowthLimitAndTrimStatic(8MB, spaceName, spaceFn, 8 * MB)
649
650#define TEST_SPACE_CREATE_FN_RANDOM(spaceName, spaceFn) \
651 class spaceName##RandomTest : public SpaceTest { \
652 }; \
653 \
654 TEST_SizeFootPrintGrowthLimitAndTrimRandom(16B, spaceName, spaceFn, 16) \
655 TEST_SizeFootPrintGrowthLimitAndTrimRandom(24B, spaceName, spaceFn, 24) \
656 TEST_SizeFootPrintGrowthLimitAndTrimRandom(32B, spaceName, spaceFn, 32) \
657 TEST_SizeFootPrintGrowthLimitAndTrimRandom(64B, spaceName, spaceFn, 64) \
658 TEST_SizeFootPrintGrowthLimitAndTrimRandom(128B, spaceName, spaceFn, 128) \
659 TEST_SizeFootPrintGrowthLimitAndTrimRandom(1KB, spaceName, spaceFn, 1 * KB) \
660 TEST_SizeFootPrintGrowthLimitAndTrimRandom(4KB, spaceName, spaceFn, 4 * KB) \
661 TEST_SizeFootPrintGrowthLimitAndTrimRandom(1MB, spaceName, spaceFn, 1 * MB) \
662 TEST_SizeFootPrintGrowthLimitAndTrimRandom(4MB, spaceName, spaceFn, 4 * MB) \
663 TEST_SizeFootPrintGrowthLimitAndTrimRandom(8MB, spaceName, spaceFn, 8 * MB)
Ian Rogers3bb17a62012-01-27 23:56:44 -0800664
Ian Rogers1d54e732013-05-02 21:10:01 -0700665} // namespace space
666} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -0700667} // namespace art
Andreas Gampea7433512014-02-21 13:19:23 -0800668
669#endif // ART_RUNTIME_GC_SPACE_SPACE_TEST_H_