blob: 09d10dd94bdc497e531c7e25a0a246d9c357d6ee [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Andreas Gampea7433512014-02-21 13:19:23 -080017#ifndef ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
18#define ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
19
Brian Carlstroma1ce1fe2014-02-24 23:23:58 -080020#include <stdint.h>
Ian Rogers700a4022014-05-19 16:49:03 -070021#include <memory>
Brian Carlstroma1ce1fe2014-02-24 23:23:58 -080022
23#include "common_runtime_test.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070024#include "globals.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070025#include "mirror/array-inl.h"
26#include "mirror/object-inl.h"
Ian Rogerse63db272014-07-15 15:36:11 -070027#include "scoped_thread_state_change.h"
28#include "zygote_space.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070029
30namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070031namespace gc {
32namespace space {
Carl Shapiro69759ea2011-07-21 18:13:35 -070033
Brian Carlstroma1ce1fe2014-02-24 23:23:58 -080034class SpaceTest : public CommonRuntimeTest {
Ian Rogers3bb17a62012-01-27 23:56:44 -080035 public:
Mathieu Chartier5647d182014-03-07 15:00:39 -080036 jobject byte_array_class_;
37
38 SpaceTest() : byte_array_class_(nullptr) {
39 }
40
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -070041 void AddSpace(ContinuousSpace* space, bool revoke = true) {
42 Heap* heap = Runtime::Current()->GetHeap();
43 if (revoke) {
44 heap->RevokeAllThreadLocalBuffers();
45 }
46 heap->AddSpace(space);
47 heap->SetSpaceAsDefault(space);
Ian Rogers1d54e732013-05-02 21:10:01 -070048 }
Mathieu Chartier5647d182014-03-07 15:00:39 -080049
50 mirror::Class* GetByteArrayClass(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070051 StackHandleScope<1> hs(self);
52 auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
Mathieu Chartier5647d182014-03-07 15:00:39 -080053 if (byte_array_class_ == nullptr) {
54 mirror::Class* byte_array_class =
55 Runtime::Current()->GetClassLinker()->FindClass(self, "[B", null_loader);
56 EXPECT_TRUE(byte_array_class != nullptr);
57 byte_array_class_ = self->GetJniEnv()->NewLocalRef(byte_array_class);
58 EXPECT_TRUE(byte_array_class_ != nullptr);
59 }
60 return reinterpret_cast<mirror::Class*>(self->DecodeJObject(byte_array_class_));
61 }
62
63 mirror::Object* Alloc(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
64 size_t* bytes_allocated, size_t* usable_size)
65 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070066 StackHandleScope<1> hs(self);
67 Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
Mathieu Chartier5647d182014-03-07 15:00:39 -080068 mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size);
69 if (obj != nullptr) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070070 InstallClass(obj, byte_array_class.Get(), bytes);
Mathieu Chartier5647d182014-03-07 15:00:39 -080071 }
72 return obj;
73 }
74
75 mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
76 size_t* bytes_allocated, size_t* usable_size)
77 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070078 StackHandleScope<1> hs(self);
79 Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
Mathieu Chartier5647d182014-03-07 15:00:39 -080080 mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size);
81 if (obj != nullptr) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070082 InstallClass(obj, byte_array_class.Get(), bytes);
Mathieu Chartier5647d182014-03-07 15:00:39 -080083 }
84 return obj;
85 }
86
87 void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size)
Mathieu Chartier4e305412014-02-19 10:54:44 -080088 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -080089 // Note the minimum size, which is the size of a zero-length byte array.
90 EXPECT_GE(size, SizeOfZeroLengthByteArray());
Mathieu Chartier4e305412014-02-19 10:54:44 -080091 EXPECT_TRUE(byte_array_class != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070092 o->SetClass(byte_array_class);
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070093 if (kUseBakerOrBrooksReadBarrier) {
94 // Like the proper heap object allocation, install and verify
95 // the correct read barrier pointer.
96 if (kUseBrooksReadBarrier) {
97 o->SetReadBarrierPointer(o);
98 }
99 o->AssertReadBarrierPointer();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800100 }
Mathieu Chartier4e305412014-02-19 10:54:44 -0800101 mirror::Array* arr = o->AsArray<kVerifyNone>();
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -0800102 size_t header_size = SizeOfZeroLengthByteArray();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700103 int32_t length = size - header_size;
104 arr->SetLength(length);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800105 EXPECT_EQ(arr->SizeOf<kVerifyNone>(), size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700106 }
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800107
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -0800108 static size_t SizeOfZeroLengthByteArray() {
109 return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value();
110 }
111
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800112 typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
Ian Rogers13735952014-10-08 12:43:28 -0700113 size_t capacity, uint8_t* requested_begin);
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800114 void InitTestBody(CreateSpaceFn create_space);
115 void ZygoteSpaceTestBody(CreateSpaceFn create_space);
116 void AllocAndFreeTestBody(CreateSpaceFn create_space);
117 void AllocAndFreeListTestBody(CreateSpaceFn create_space);
118
119 void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
120 int round, size_t growth_limit);
121 void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800122};
Brian Carlstrom9b7f2c22011-09-27 14:35:04 -0700123
Ian Rogers719d1a32014-03-06 12:13:39 -0800124static inline size_t test_rand(size_t* seed) {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700125 *seed = *seed * 1103515245 + 12345;
126 return *seed;
127}
128
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800129void SpaceTest::InitTestBody(CreateSpaceFn create_space) {
Andreas Gampe369810a2015-01-14 19:53:31 -0800130 // This will lead to error messages in the log.
131 ScopedLogSeverity sls(LogSeverity::FATAL);
132
Carl Shapiro69759ea2011-07-21 18:13:35 -0700133 {
jeffhaoc1160702011-10-27 15:48:45 -0700134 // Init < max == growth
Ian Rogers700a4022014-05-19 16:49:03 -0700135 std::unique_ptr<Space> space(create_space("test", 16 * MB, 32 * MB, 32 * MB, nullptr));
Mathieu Chartier4e305412014-02-19 10:54:44 -0800136 EXPECT_TRUE(space.get() != nullptr);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700137 }
138 {
jeffhaoc1160702011-10-27 15:48:45 -0700139 // Init == max == growth
Ian Rogers700a4022014-05-19 16:49:03 -0700140 std::unique_ptr<Space> space(create_space("test", 16 * MB, 16 * MB, 16 * MB, nullptr));
Mathieu Chartier4e305412014-02-19 10:54:44 -0800141 EXPECT_TRUE(space.get() != nullptr);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700142 }
143 {
jeffhaoc1160702011-10-27 15:48:45 -0700144 // Init > max == growth
Ian Rogers700a4022014-05-19 16:49:03 -0700145 std::unique_ptr<Space> space(create_space("test", 32 * MB, 16 * MB, 16 * MB, nullptr));
Mathieu Chartier4e305412014-02-19 10:54:44 -0800146 EXPECT_TRUE(space.get() == nullptr);
jeffhaoc1160702011-10-27 15:48:45 -0700147 }
148 {
149 // Growth == init < max
Ian Rogers700a4022014-05-19 16:49:03 -0700150 std::unique_ptr<Space> space(create_space("test", 16 * MB, 16 * MB, 32 * MB, nullptr));
Mathieu Chartier4e305412014-02-19 10:54:44 -0800151 EXPECT_TRUE(space.get() != nullptr);
jeffhaoc1160702011-10-27 15:48:45 -0700152 }
153 {
154 // Growth < init < max
Ian Rogers700a4022014-05-19 16:49:03 -0700155 std::unique_ptr<Space> space(create_space("test", 16 * MB, 8 * MB, 32 * MB, nullptr));
Mathieu Chartier4e305412014-02-19 10:54:44 -0800156 EXPECT_TRUE(space.get() == nullptr);
jeffhaoc1160702011-10-27 15:48:45 -0700157 }
158 {
159 // Init < growth < max
Ian Rogers700a4022014-05-19 16:49:03 -0700160 std::unique_ptr<Space> space(create_space("test", 8 * MB, 16 * MB, 32 * MB, nullptr));
Mathieu Chartier4e305412014-02-19 10:54:44 -0800161 EXPECT_TRUE(space.get() != nullptr);
jeffhaoc1160702011-10-27 15:48:45 -0700162 }
163 {
164 // Init < max < growth
Ian Rogers700a4022014-05-19 16:49:03 -0700165 std::unique_ptr<Space> space(create_space("test", 8 * MB, 32 * MB, 16 * MB, nullptr));
Mathieu Chartier4e305412014-02-19 10:54:44 -0800166 EXPECT_TRUE(space.get() == nullptr);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700167 }
168}
169
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700170// TODO: This test is not very good, we should improve it.
171// The test should do more allocations before the creation of the ZygoteSpace, and then do
172// allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
173// the GC works with the ZygoteSpace.
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800174void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
Ian Rogers6fac4472014-02-25 17:01:10 -0800175 size_t dummy;
Mathieu Chartier4e305412014-02-19 10:54:44 -0800176 MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
177 ASSERT_TRUE(space != nullptr);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700178
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800179 // Make space findable to the heap, will also delete space when runtime is cleaned up
180 AddSpace(space);
181 Thread* self = Thread::Current();
Mathieu Chartier4e305412014-02-19 10:54:44 -0800182 ScopedObjectAccess soa(self);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700183
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800184 // Succeeds, fits without adjusting the footprint limit.
Ian Rogers6fac4472014-02-25 17:01:10 -0800185 size_t ptr1_bytes_allocated, ptr1_usable_size;
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700186 StackHandleScope<3> hs(soa.Self());
Andreas Gampe5a4b8a22014-09-11 08:30:08 -0700187 MutableHandle<mirror::Object> ptr1(
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700188 hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
189 EXPECT_TRUE(ptr1.Get() != nullptr);
Ian Rogers6fac4472014-02-25 17:01:10 -0800190 EXPECT_LE(1U * MB, ptr1_bytes_allocated);
191 EXPECT_LE(1U * MB, ptr1_usable_size);
192 EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700193
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800194 // Fails, requires a higher footprint limit.
Mathieu Chartier5647d182014-03-07 15:00:39 -0800195 mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800196 EXPECT_TRUE(ptr2 == nullptr);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700197
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800198 // Succeeds, adjusts the footprint.
Ian Rogers6fac4472014-02-25 17:01:10 -0800199 size_t ptr3_bytes_allocated, ptr3_usable_size;
Andreas Gampe5a4b8a22014-09-11 08:30:08 -0700200 MutableHandle<mirror::Object> ptr3(
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700201 hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
202 EXPECT_TRUE(ptr3.Get() != nullptr);
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800203 EXPECT_LE(8U * MB, ptr3_bytes_allocated);
Ian Rogers6fac4472014-02-25 17:01:10 -0800204 EXPECT_LE(8U * MB, ptr3_usable_size);
205 EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700206
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800207 // Fails, requires a higher footprint limit.
Ian Rogers6fac4472014-02-25 17:01:10 -0800208 mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy, nullptr);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800209 EXPECT_TRUE(ptr4 == nullptr);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700210
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800211 // Also fails, requires a higher allowed footprint.
Ian Rogers6fac4472014-02-25 17:01:10 -0800212 mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy, nullptr);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800213 EXPECT_TRUE(ptr5 == nullptr);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700214
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800215 // Release some memory.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700216 size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800217 EXPECT_EQ(free3, ptr3_bytes_allocated);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700218 EXPECT_EQ(free3, space->Free(self, ptr3.Assign(nullptr)));
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800219 EXPECT_LE(8U * MB, free3);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700220
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800221 // Succeeds, now that memory has been freed.
Ian Rogers6fac4472014-02-25 17:01:10 -0800222 size_t ptr6_bytes_allocated, ptr6_usable_size;
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700223 Handle<mirror::Object> ptr6(
224 hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size)));
225 EXPECT_TRUE(ptr6.Get() != nullptr);
Ian Rogers6fac4472014-02-25 17:01:10 -0800226 EXPECT_LE(9U * MB, ptr6_bytes_allocated);
227 EXPECT_LE(9U * MB, ptr6_usable_size);
228 EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700229
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800230 // Final clean up.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700231 size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
232 space->Free(self, ptr1.Assign(nullptr));
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800233 EXPECT_LE(1U * MB, free1);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700234
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800235 // Make sure that the zygote space isn't directly at the start of the space.
Ian Rogers6fac4472014-02-25 17:01:10 -0800236 EXPECT_TRUE(space->Alloc(self, 1U * MB, &dummy, nullptr) != nullptr);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800237
238 gc::Heap* heap = Runtime::Current()->GetHeap();
239 space::Space* old_space = space;
240 heap->RemoveSpace(old_space);
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700241 heap->RevokeAllThreadLocalBuffers();
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800242 space::ZygoteSpace* zygote_space = space->CreateZygoteSpace("alloc space",
243 heap->IsLowMemoryMode(),
244 &space);
245 delete old_space;
246 // Add the zygote space.
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700247 AddSpace(zygote_space, false);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700248
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800249 // Make space findable to the heap, will also delete space when runtime is cleaned up
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700250 AddSpace(space, false);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700251
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800252 // Succeeds, fits without adjusting the footprint limit.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700253 ptr1.Assign(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size));
254 EXPECT_TRUE(ptr1.Get() != nullptr);
Ian Rogers6fac4472014-02-25 17:01:10 -0800255 EXPECT_LE(1U * MB, ptr1_bytes_allocated);
256 EXPECT_LE(1U * MB, ptr1_usable_size);
257 EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700258
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800259 // Fails, requires a higher footprint limit.
Mathieu Chartier5647d182014-03-07 15:00:39 -0800260 ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800261 EXPECT_TRUE(ptr2 == nullptr);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700262
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800263 // Succeeds, adjusts the footprint.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700264 ptr3.Assign(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size));
265 EXPECT_TRUE(ptr3.Get() != nullptr);
Ian Rogers6fac4472014-02-25 17:01:10 -0800266 EXPECT_LE(2U * MB, ptr3_bytes_allocated);
267 EXPECT_LE(2U * MB, ptr3_usable_size);
268 EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700269 space->Free(self, ptr3.Assign(nullptr));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700270
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800271 // Final clean up.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700272 free1 = space->AllocationSize(ptr1.Get(), nullptr);
273 space->Free(self, ptr1.Assign(nullptr));
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800274 EXPECT_LE(1U * MB, free1);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700275}
276
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800277void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700278 size_t dummy = 0;
Mathieu Chartier4e305412014-02-19 10:54:44 -0800279 MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
280 ASSERT_TRUE(space != nullptr);
Ian Rogers50b35e22012-10-04 10:09:15 -0700281 Thread* self = Thread::Current();
Mathieu Chartier4e305412014-02-19 10:54:44 -0800282 ScopedObjectAccess soa(self);
Ian Rogers30fab402012-01-23 15:43:46 -0800283
Ian Rogers3bb17a62012-01-27 23:56:44 -0800284 // Make space findable to the heap, will also delete space when runtime is cleaned up
Mathieu Chartier590fee92013-09-13 13:46:47 -0700285 AddSpace(space);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700286
Ian Rogers3bb17a62012-01-27 23:56:44 -0800287 // Succeeds, fits without adjusting the footprint limit.
Ian Rogers6fac4472014-02-25 17:01:10 -0800288 size_t ptr1_bytes_allocated, ptr1_usable_size;
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700289 StackHandleScope<3> hs(soa.Self());
Andreas Gampe5a4b8a22014-09-11 08:30:08 -0700290 MutableHandle<mirror::Object> ptr1(
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700291 hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
292 EXPECT_TRUE(ptr1.Get() != nullptr);
Ian Rogers6fac4472014-02-25 17:01:10 -0800293 EXPECT_LE(1U * MB, ptr1_bytes_allocated);
294 EXPECT_LE(1U * MB, ptr1_usable_size);
295 EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700296
Ian Rogers3bb17a62012-01-27 23:56:44 -0800297 // Fails, requires a higher footprint limit.
Mathieu Chartier5647d182014-03-07 15:00:39 -0800298 mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800299 EXPECT_TRUE(ptr2 == nullptr);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700300
301 // Succeeds, adjusts the footprint.
Ian Rogers6fac4472014-02-25 17:01:10 -0800302 size_t ptr3_bytes_allocated, ptr3_usable_size;
Andreas Gampe5a4b8a22014-09-11 08:30:08 -0700303 MutableHandle<mirror::Object> ptr3(
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700304 hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
305 EXPECT_TRUE(ptr3.Get() != nullptr);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700306 EXPECT_LE(8U * MB, ptr3_bytes_allocated);
Ian Rogers6fac4472014-02-25 17:01:10 -0800307 EXPECT_LE(8U * MB, ptr3_usable_size);
308 EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700309
Ian Rogers3bb17a62012-01-27 23:56:44 -0800310 // Fails, requires a higher footprint limit.
Mathieu Chartier5647d182014-03-07 15:00:39 -0800311 mirror::Object* ptr4 = Alloc(space, self, 8 * MB, &dummy, nullptr);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800312 EXPECT_TRUE(ptr4 == nullptr);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700313
314 // Also fails, requires a higher allowed footprint.
Mathieu Chartier5647d182014-03-07 15:00:39 -0800315 mirror::Object* ptr5 = AllocWithGrowth(space, self, 8 * MB, &dummy, nullptr);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800316 EXPECT_TRUE(ptr5 == nullptr);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700317
318 // Release some memory.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700319 size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700320 EXPECT_EQ(free3, ptr3_bytes_allocated);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700321 space->Free(self, ptr3.Assign(nullptr));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700322 EXPECT_LE(8U * MB, free3);
323
324 // Succeeds, now that memory has been freed.
Ian Rogers6fac4472014-02-25 17:01:10 -0800325 size_t ptr6_bytes_allocated, ptr6_usable_size;
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700326 Handle<mirror::Object> ptr6(
327 hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size)));
328 EXPECT_TRUE(ptr6.Get() != nullptr);
Ian Rogers6fac4472014-02-25 17:01:10 -0800329 EXPECT_LE(9U * MB, ptr6_bytes_allocated);
330 EXPECT_LE(9U * MB, ptr6_usable_size);
331 EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700332
333 // Final clean up.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700334 size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
335 space->Free(self, ptr1.Assign(nullptr));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700336 EXPECT_LE(1U * MB, free1);
337}
338
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800339void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800340 MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
341 ASSERT_TRUE(space != nullptr);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800342
343 // Make space findable to the heap, will also delete space when runtime is cleaned up
Mathieu Chartier590fee92013-09-13 13:46:47 -0700344 AddSpace(space);
Ian Rogers50b35e22012-10-04 10:09:15 -0700345 Thread* self = Thread::Current();
Mathieu Chartier4e305412014-02-19 10:54:44 -0800346 ScopedObjectAccess soa(self);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800347
348 // Succeeds, fits without adjusting the max allowed footprint.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800349 mirror::Object* lots_of_objects[1024];
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700350 for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
Ian Rogers6fac4472014-02-25 17:01:10 -0800351 size_t allocation_size, usable_size;
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -0800352 size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
Mathieu Chartier5647d182014-03-07 15:00:39 -0800353 lots_of_objects[i] = Alloc(space, self, size_of_zero_length_byte_array, &allocation_size,
354 &usable_size);
Ian Rogersef7d42f2014-01-06 12:55:46 -0800355 EXPECT_TRUE(lots_of_objects[i] != nullptr);
Ian Rogers6fac4472014-02-25 17:01:10 -0800356 size_t computed_usable_size;
357 EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
358 EXPECT_EQ(usable_size, computed_usable_size);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800359 }
360
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700361 // Release memory.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800362 space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800363
364 // Succeeds, fits by adjusting the max allowed footprint.
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700365 for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
Ian Rogers6fac4472014-02-25 17:01:10 -0800366 size_t allocation_size, usable_size;
Mathieu Chartier5647d182014-03-07 15:00:39 -0800367 lots_of_objects[i] = AllocWithGrowth(space, self, 1024, &allocation_size, &usable_size);
Ian Rogersef7d42f2014-01-06 12:55:46 -0800368 EXPECT_TRUE(lots_of_objects[i] != nullptr);
Ian Rogers6fac4472014-02-25 17:01:10 -0800369 size_t computed_usable_size;
370 EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
371 EXPECT_EQ(usable_size, computed_usable_size);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800372 }
373
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700374 // Release memory.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800375 space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800376}
377
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800378void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
Ian Rogers3bb17a62012-01-27 23:56:44 -0800379 int round, size_t growth_limit) {
380 if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) ||
381 ((object_size < 0 && -object_size >= static_cast<intptr_t>(growth_limit)))) {
382 // No allocation can succeed
383 return;
384 }
Ian Rogers3bb17a62012-01-27 23:56:44 -0800385
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700386 // The space's footprint equals amount of resources requested from system
387 size_t footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800388
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700389 // The space must at least have its book keeping allocated
Ian Rogers3bb17a62012-01-27 23:56:44 -0800390 EXPECT_GT(footprint, 0u);
391
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700392 // But it shouldn't exceed the initial size
Ian Rogers3bb17a62012-01-27 23:56:44 -0800393 EXPECT_LE(footprint, growth_limit);
394
395 // space's size shouldn't exceed the initial size
396 EXPECT_LE(space->Size(), growth_limit);
397
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700398 // this invariant should always hold or else the space has grown to be larger than what the
Ian Rogers3bb17a62012-01-27 23:56:44 -0800399 // space believes its size is (which will break invariants)
400 EXPECT_GE(space->Size(), footprint);
401
402 // Fill the space with lots of small objects up to the growth limit
403 size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1;
Ian Rogers700a4022014-05-19 16:49:03 -0700404 std::unique_ptr<mirror::Object*[]> lots_of_objects(new mirror::Object*[max_objects]);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800405 size_t last_object = 0; // last object for which allocation succeeded
406 size_t amount_allocated = 0; // amount of space allocated
Ian Rogers50b35e22012-10-04 10:09:15 -0700407 Thread* self = Thread::Current();
Mathieu Chartier4e305412014-02-19 10:54:44 -0800408 ScopedObjectAccess soa(self);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700409 size_t rand_seed = 123456789;
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700410 for (size_t i = 0; i < max_objects; i++) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800411 size_t alloc_fails = 0; // number of failed allocations
412 size_t max_fails = 30; // number of times we fail allocation before giving up
413 for (; alloc_fails < max_fails; alloc_fails++) {
414 size_t alloc_size;
415 if (object_size > 0) {
416 alloc_size = object_size;
417 } else {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700418 alloc_size = test_rand(&rand_seed) % static_cast<size_t>(-object_size);
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -0800419 // Note the minimum size, which is the size of a zero-length byte array.
420 size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
421 if (alloc_size < size_of_zero_length_byte_array) {
422 alloc_size = size_of_zero_length_byte_array;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800423 }
424 }
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700425 StackHandleScope<1> hs(soa.Self());
426 auto object(hs.NewHandle<mirror::Object>(nullptr));
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700427 size_t bytes_allocated = 0;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800428 if (round <= 1) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700429 object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800430 } else {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700431 object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800432 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700433 footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800434 EXPECT_GE(space->Size(), footprint); // invariant
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700435 if (object.Get() != nullptr) { // allocation succeeded
436 lots_of_objects[i] = object.Get();
437 size_t allocation_size = space->AllocationSize(object.Get(), nullptr);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700438 EXPECT_EQ(bytes_allocated, allocation_size);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800439 if (object_size > 0) {
440 EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
441 } else {
442 EXPECT_GE(allocation_size, 8u);
443 }
444 amount_allocated += allocation_size;
445 break;
446 }
447 }
448 if (alloc_fails == max_fails) {
449 last_object = i;
450 break;
451 }
452 }
453 CHECK_NE(last_object, 0u); // we should have filled the space
454 EXPECT_GT(amount_allocated, 0u);
455
456 // We shouldn't have gone past the growth_limit
457 EXPECT_LE(amount_allocated, growth_limit);
458 EXPECT_LE(footprint, growth_limit);
459 EXPECT_LE(space->Size(), growth_limit);
460
461 // footprint and size should agree with amount allocated
462 EXPECT_GE(footprint, amount_allocated);
463 EXPECT_GE(space->Size(), amount_allocated);
464
465 // Release storage in a semi-adhoc manner
466 size_t free_increment = 96;
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700467 while (true) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800468 {
469 ScopedThreadStateChange tsc(self, kNative);
470 // Give the space a haircut.
471 space->Trim();
472 }
Ian Rogers3bb17a62012-01-27 23:56:44 -0800473
474 // Bounds sanity
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700475 footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800476 EXPECT_LE(amount_allocated, growth_limit);
477 EXPECT_GE(footprint, amount_allocated);
478 EXPECT_LE(footprint, growth_limit);
479 EXPECT_GE(space->Size(), amount_allocated);
480 EXPECT_LE(space->Size(), growth_limit);
481
482 if (free_increment == 0) {
483 break;
484 }
485
Mathieu Chartier4e305412014-02-19 10:54:44 -0800486 // Free some objects
487 for (size_t i = 0; i < last_object; i += free_increment) {
488 mirror::Object* object = lots_of_objects.get()[i];
489 if (object == nullptr) {
490 continue;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800491 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800492 size_t allocation_size = space->AllocationSize(object, nullptr);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800493 if (object_size > 0) {
494 EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
495 } else {
496 EXPECT_GE(allocation_size, 8u);
497 }
498 space->Free(self, object);
499 lots_of_objects.get()[i] = nullptr;
500 amount_allocated -= allocation_size;
501 footprint = space->GetFootprint();
502 EXPECT_GE(space->Size(), footprint); // invariant
Ian Rogers3bb17a62012-01-27 23:56:44 -0800503 }
Mathieu Chartier4e305412014-02-19 10:54:44 -0800504
505 free_increment >>= 1;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800506 }
Mathieu Chartier4e305412014-02-19 10:54:44 -0800507
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700508 // The space has become empty here before allocating a large object
509 // below. For RosAlloc, revoke thread-local runs, which are kept
510 // even when empty for a performance reason, so that they won't
511 // cause the following large object allocation to fail due to
512 // potential fragmentation. Note they are normally revoked at each
513 // GC (but no GC here.)
514 space->RevokeAllThreadLocalBuffers();
515
Ian Rogers3bb17a62012-01-27 23:56:44 -0800516 // All memory was released, try a large allocation to check freed memory is being coalesced
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700517 StackHandleScope<1> hs(soa.Self());
518 auto large_object(hs.NewHandle<mirror::Object>(nullptr));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800519 size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700520 size_t bytes_allocated = 0;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800521 if (round <= 1) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700522 large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800523 } else {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700524 large_object.Assign(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
525 nullptr));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800526 }
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700527 EXPECT_TRUE(large_object.Get() != nullptr);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800528
529 // Sanity check footprint
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700530 footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800531 EXPECT_LE(footprint, growth_limit);
532 EXPECT_GE(space->Size(), footprint);
533 EXPECT_LE(space->Size(), growth_limit);
534
535 // Clean up
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700536 space->Free(self, large_object.Assign(nullptr));
Mathieu Chartier4e305412014-02-19 10:54:44 -0800537
Ian Rogers3bb17a62012-01-27 23:56:44 -0800538 // Sanity check footprint
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700539 footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800540 EXPECT_LE(footprint, growth_limit);
541 EXPECT_GE(space->Size(), footprint);
542 EXPECT_LE(space->Size(), growth_limit);
543}
544
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800545void SpaceTest::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space) {
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -0800546 if (object_size < SizeOfZeroLengthByteArray()) {
547 // Too small for the object layout/model.
548 return;
549 }
Ian Rogers3bb17a62012-01-27 23:56:44 -0800550 size_t initial_size = 4 * MB;
551 size_t growth_limit = 8 * MB;
552 size_t capacity = 16 * MB;
Mathieu Chartier4e305412014-02-19 10:54:44 -0800553 MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr));
554 ASSERT_TRUE(space != nullptr);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800555
556 // Basic sanity
557 EXPECT_EQ(space->Capacity(), growth_limit);
558 EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
559
560 // Make space findable to the heap, will also delete space when runtime is cleaned up
Mathieu Chartier590fee92013-09-13 13:46:47 -0700561 AddSpace(space);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800562
563 // In this round we don't allocate with growth and therefore can't grow past the initial size.
564 // This effectively makes the growth_limit the initial_size, so assert this.
565 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 1, initial_size);
566 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 2, growth_limit);
567 // Remove growth limit
568 space->ClearGrowthLimit();
569 EXPECT_EQ(space->Capacity(), capacity);
570 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 3, capacity);
571}
572
Andreas Gampe24651ec2014-02-27 13:26:16 -0800573#define TEST_SizeFootPrintGrowthLimitAndTrimStatic(name, spaceName, spaceFn, size) \
574 TEST_F(spaceName##StaticTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name) { \
Andreas Gampea7433512014-02-21 13:19:23 -0800575 SizeFootPrintGrowthLimitAndTrimDriver(size, spaceFn); \
Andreas Gampe24651ec2014-02-27 13:26:16 -0800576 }
577
578#define TEST_SizeFootPrintGrowthLimitAndTrimRandom(name, spaceName, spaceFn, size) \
579 TEST_F(spaceName##RandomTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \
Andreas Gampea7433512014-02-21 13:19:23 -0800580 SizeFootPrintGrowthLimitAndTrimDriver(-size, spaceFn); \
Ian Rogers3bb17a62012-01-27 23:56:44 -0800581 }
582
Andreas Gampe24651ec2014-02-27 13:26:16 -0800583#define TEST_SPACE_CREATE_FN_BASE(spaceName, spaceFn) \
584 class spaceName##BaseTest : public SpaceTest { \
Andreas Gampea7433512014-02-21 13:19:23 -0800585 }; \
586 \
Andreas Gampe24651ec2014-02-27 13:26:16 -0800587 TEST_F(spaceName##BaseTest, Init) { \
Andreas Gampea7433512014-02-21 13:19:23 -0800588 InitTestBody(spaceFn); \
589 } \
Andreas Gampe24651ec2014-02-27 13:26:16 -0800590 TEST_F(spaceName##BaseTest, ZygoteSpace) { \
Andreas Gampea7433512014-02-21 13:19:23 -0800591 ZygoteSpaceTestBody(spaceFn); \
592 } \
Andreas Gampe24651ec2014-02-27 13:26:16 -0800593 TEST_F(spaceName##BaseTest, AllocAndFree) { \
Andreas Gampea7433512014-02-21 13:19:23 -0800594 AllocAndFreeTestBody(spaceFn); \
595 } \
Andreas Gampe24651ec2014-02-27 13:26:16 -0800596 TEST_F(spaceName##BaseTest, AllocAndFreeList) { \
Andreas Gampea7433512014-02-21 13:19:23 -0800597 AllocAndFreeListTestBody(spaceFn); \
Andreas Gampe24651ec2014-02-27 13:26:16 -0800598 }
599
600#define TEST_SPACE_CREATE_FN_STATIC(spaceName, spaceFn) \
601 class spaceName##StaticTest : public SpaceTest { \
602 }; \
603 \
604 TEST_SizeFootPrintGrowthLimitAndTrimStatic(12B, spaceName, spaceFn, 12) \
605 TEST_SizeFootPrintGrowthLimitAndTrimStatic(16B, spaceName, spaceFn, 16) \
606 TEST_SizeFootPrintGrowthLimitAndTrimStatic(24B, spaceName, spaceFn, 24) \
607 TEST_SizeFootPrintGrowthLimitAndTrimStatic(32B, spaceName, spaceFn, 32) \
608 TEST_SizeFootPrintGrowthLimitAndTrimStatic(64B, spaceName, spaceFn, 64) \
609 TEST_SizeFootPrintGrowthLimitAndTrimStatic(128B, spaceName, spaceFn, 128) \
610 TEST_SizeFootPrintGrowthLimitAndTrimStatic(1KB, spaceName, spaceFn, 1 * KB) \
611 TEST_SizeFootPrintGrowthLimitAndTrimStatic(4KB, spaceName, spaceFn, 4 * KB) \
612 TEST_SizeFootPrintGrowthLimitAndTrimStatic(1MB, spaceName, spaceFn, 1 * MB) \
613 TEST_SizeFootPrintGrowthLimitAndTrimStatic(4MB, spaceName, spaceFn, 4 * MB) \
614 TEST_SizeFootPrintGrowthLimitAndTrimStatic(8MB, spaceName, spaceFn, 8 * MB)
615
616#define TEST_SPACE_CREATE_FN_RANDOM(spaceName, spaceFn) \
617 class spaceName##RandomTest : public SpaceTest { \
618 }; \
619 \
620 TEST_SizeFootPrintGrowthLimitAndTrimRandom(16B, spaceName, spaceFn, 16) \
621 TEST_SizeFootPrintGrowthLimitAndTrimRandom(24B, spaceName, spaceFn, 24) \
622 TEST_SizeFootPrintGrowthLimitAndTrimRandom(32B, spaceName, spaceFn, 32) \
623 TEST_SizeFootPrintGrowthLimitAndTrimRandom(64B, spaceName, spaceFn, 64) \
624 TEST_SizeFootPrintGrowthLimitAndTrimRandom(128B, spaceName, spaceFn, 128) \
625 TEST_SizeFootPrintGrowthLimitAndTrimRandom(1KB, spaceName, spaceFn, 1 * KB) \
626 TEST_SizeFootPrintGrowthLimitAndTrimRandom(4KB, spaceName, spaceFn, 4 * KB) \
627 TEST_SizeFootPrintGrowthLimitAndTrimRandom(1MB, spaceName, spaceFn, 1 * MB) \
628 TEST_SizeFootPrintGrowthLimitAndTrimRandom(4MB, spaceName, spaceFn, 4 * MB) \
629 TEST_SizeFootPrintGrowthLimitAndTrimRandom(8MB, spaceName, spaceFn, 8 * MB)
Ian Rogers3bb17a62012-01-27 23:56:44 -0800630
Ian Rogers1d54e732013-05-02 21:10:01 -0700631} // namespace space
632} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -0700633} // namespace art
Andreas Gampea7433512014-02-21 13:19:23 -0800634
635#endif // ART_RUNTIME_GC_SPACE_SPACE_TEST_H_