blob: e588eb3efa2797f77e792f556bcc54895ae50ecf [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Andreas Gampea7433512014-02-21 13:19:23 -080017#ifndef ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
18#define ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
19
Brian Carlstroma1ce1fe2014-02-24 23:23:58 -080020#include <stdint.h>
Ian Rogers700a4022014-05-19 16:49:03 -070021#include <memory>
Brian Carlstroma1ce1fe2014-02-24 23:23:58 -080022
23#include "common_runtime_test.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070024#include "globals.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070025#include "mirror/array-inl.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070026#include "mirror/class-inl.h"
27#include "mirror/class_loader.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070028#include "mirror/object-inl.h"
Ian Rogerse63db272014-07-15 15:36:11 -070029#include "scoped_thread_state_change.h"
30#include "zygote_space.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070031
32namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070033namespace gc {
34namespace space {
Carl Shapiro69759ea2011-07-21 18:13:35 -070035
Mathieu Chartier28b1cf72016-01-15 16:44:57 -080036template <class Super>
37class SpaceTest : public Super {
Ian Rogers3bb17a62012-01-27 23:56:44 -080038 public:
Mathieu Chartier28b1cf72016-01-15 16:44:57 -080039 jobject byte_array_class_ = nullptr;
Mathieu Chartier5647d182014-03-07 15:00:39 -080040
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -070041 void AddSpace(ContinuousSpace* space, bool revoke = true) {
42 Heap* heap = Runtime::Current()->GetHeap();
43 if (revoke) {
44 heap->RevokeAllThreadLocalBuffers();
45 }
46 heap->AddSpace(space);
47 heap->SetSpaceAsDefault(space);
Ian Rogers1d54e732013-05-02 21:10:01 -070048 }
Mathieu Chartier5647d182014-03-07 15:00:39 -080049
Mathieu Chartier90443472015-07-16 20:32:27 -070050 mirror::Class* GetByteArrayClass(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070051 StackHandleScope<1> hs(self);
52 auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
Mathieu Chartier5647d182014-03-07 15:00:39 -080053 if (byte_array_class_ == nullptr) {
54 mirror::Class* byte_array_class =
55 Runtime::Current()->GetClassLinker()->FindClass(self, "[B", null_loader);
56 EXPECT_TRUE(byte_array_class != nullptr);
57 byte_array_class_ = self->GetJniEnv()->NewLocalRef(byte_array_class);
58 EXPECT_TRUE(byte_array_class_ != nullptr);
59 }
60 return reinterpret_cast<mirror::Class*>(self->DecodeJObject(byte_array_class_));
61 }
62
Mathieu Chartier28b1cf72016-01-15 16:44:57 -080063 mirror::Object* Alloc(space::MallocSpace* alloc_space,
64 Thread* self,
65 size_t bytes,
66 size_t* bytes_allocated,
67 size_t* usable_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070068 size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -070069 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070070 StackHandleScope<1> hs(self);
71 Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
Mathieu Chartier28b1cf72016-01-15 16:44:57 -080072 mirror::Object* obj = alloc_space->Alloc(self,
73 bytes,
74 bytes_allocated,
75 usable_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070076 bytes_tl_bulk_allocated);
Mathieu Chartier5647d182014-03-07 15:00:39 -080077 if (obj != nullptr) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070078 InstallClass(obj, byte_array_class.Get(), bytes);
Mathieu Chartier5647d182014-03-07 15:00:39 -080079 }
80 return obj;
81 }
82
Mathieu Chartier28b1cf72016-01-15 16:44:57 -080083 mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space,
84 Thread* self,
85 size_t bytes,
86 size_t* bytes_allocated,
87 size_t* usable_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070088 size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -070089 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070090 StackHandleScope<1> hs(self);
91 Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070092 mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size,
93 bytes_tl_bulk_allocated);
Mathieu Chartier5647d182014-03-07 15:00:39 -080094 if (obj != nullptr) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070095 InstallClass(obj, byte_array_class.Get(), bytes);
Mathieu Chartier5647d182014-03-07 15:00:39 -080096 }
97 return obj;
98 }
99
100 void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size)
Mathieu Chartier90443472015-07-16 20:32:27 -0700101 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -0800102 // Note the minimum size, which is the size of a zero-length byte array.
103 EXPECT_GE(size, SizeOfZeroLengthByteArray());
Mathieu Chartier4e305412014-02-19 10:54:44 -0800104 EXPECT_TRUE(byte_array_class != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700105 o->SetClass(byte_array_class);
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -0700106 if (kUseBakerOrBrooksReadBarrier) {
107 // Like the proper heap object allocation, install and verify
108 // the correct read barrier pointer.
109 if (kUseBrooksReadBarrier) {
110 o->SetReadBarrierPointer(o);
111 }
112 o->AssertReadBarrierPointer();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800113 }
Mathieu Chartier4e305412014-02-19 10:54:44 -0800114 mirror::Array* arr = o->AsArray<kVerifyNone>();
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -0800115 size_t header_size = SizeOfZeroLengthByteArray();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700116 int32_t length = size - header_size;
117 arr->SetLength(length);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800118 EXPECT_EQ(arr->SizeOf<kVerifyNone>(), size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700119 }
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800120
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -0800121 static size_t SizeOfZeroLengthByteArray() {
122 return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value();
123 }
124
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800125 typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
Ian Rogers13735952014-10-08 12:43:28 -0700126 size_t capacity, uint8_t* requested_begin);
Hiroshi Yamauchi3ddbd422013-12-06 17:43:36 -0800127
128 void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
129 int round, size_t growth_limit);
130 void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800131};
Brian Carlstrom9b7f2c22011-09-27 14:35:04 -0700132
Ian Rogers719d1a32014-03-06 12:13:39 -0800133static inline size_t test_rand(size_t* seed) {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700134 *seed = *seed * 1103515245 + 12345;
135 return *seed;
136}
137
Mathieu Chartier28b1cf72016-01-15 16:44:57 -0800138template <class Super>
139void SpaceTest<Super>::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space,
140 intptr_t object_size,
141 int round,
142 size_t growth_limit) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800143 if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) ||
144 ((object_size < 0 && -object_size >= static_cast<intptr_t>(growth_limit)))) {
145 // No allocation can succeed
146 return;
147 }
Ian Rogers3bb17a62012-01-27 23:56:44 -0800148
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700149 // The space's footprint equals amount of resources requested from system
150 size_t footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800151
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700152 // The space must at least have its book keeping allocated
Ian Rogers3bb17a62012-01-27 23:56:44 -0800153 EXPECT_GT(footprint, 0u);
154
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700155 // But it shouldn't exceed the initial size
Ian Rogers3bb17a62012-01-27 23:56:44 -0800156 EXPECT_LE(footprint, growth_limit);
157
158 // space's size shouldn't exceed the initial size
159 EXPECT_LE(space->Size(), growth_limit);
160
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700161 // this invariant should always hold or else the space has grown to be larger than what the
Ian Rogers3bb17a62012-01-27 23:56:44 -0800162 // space believes its size is (which will break invariants)
163 EXPECT_GE(space->Size(), footprint);
164
165 // Fill the space with lots of small objects up to the growth limit
166 size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1;
Ian Rogers700a4022014-05-19 16:49:03 -0700167 std::unique_ptr<mirror::Object*[]> lots_of_objects(new mirror::Object*[max_objects]);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800168 size_t last_object = 0; // last object for which allocation succeeded
169 size_t amount_allocated = 0; // amount of space allocated
Ian Rogers50b35e22012-10-04 10:09:15 -0700170 Thread* self = Thread::Current();
Mathieu Chartier4e305412014-02-19 10:54:44 -0800171 ScopedObjectAccess soa(self);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700172 size_t rand_seed = 123456789;
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700173 for (size_t i = 0; i < max_objects; i++) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800174 size_t alloc_fails = 0; // number of failed allocations
175 size_t max_fails = 30; // number of times we fail allocation before giving up
176 for (; alloc_fails < max_fails; alloc_fails++) {
177 size_t alloc_size;
178 if (object_size > 0) {
179 alloc_size = object_size;
180 } else {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700181 alloc_size = test_rand(&rand_seed) % static_cast<size_t>(-object_size);
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -0800182 // Note the minimum size, which is the size of a zero-length byte array.
183 size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
184 if (alloc_size < size_of_zero_length_byte_array) {
185 alloc_size = size_of_zero_length_byte_array;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800186 }
187 }
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700188 StackHandleScope<1> hs(soa.Self());
189 auto object(hs.NewHandle<mirror::Object>(nullptr));
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700190 size_t bytes_allocated = 0;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700191 size_t bytes_tl_bulk_allocated;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800192 if (round <= 1) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700193 object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr,
194 &bytes_tl_bulk_allocated));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800195 } else {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700196 object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr,
197 &bytes_tl_bulk_allocated));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800198 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700199 footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800200 EXPECT_GE(space->Size(), footprint); // invariant
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700201 if (object.Get() != nullptr) { // allocation succeeded
202 lots_of_objects[i] = object.Get();
203 size_t allocation_size = space->AllocationSize(object.Get(), nullptr);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700204 EXPECT_EQ(bytes_allocated, allocation_size);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800205 if (object_size > 0) {
206 EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
207 } else {
208 EXPECT_GE(allocation_size, 8u);
209 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700210 EXPECT_TRUE(bytes_tl_bulk_allocated == 0 ||
211 bytes_tl_bulk_allocated >= allocation_size);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800212 amount_allocated += allocation_size;
213 break;
214 }
215 }
216 if (alloc_fails == max_fails) {
217 last_object = i;
218 break;
219 }
220 }
221 CHECK_NE(last_object, 0u); // we should have filled the space
222 EXPECT_GT(amount_allocated, 0u);
223
224 // We shouldn't have gone past the growth_limit
225 EXPECT_LE(amount_allocated, growth_limit);
226 EXPECT_LE(footprint, growth_limit);
227 EXPECT_LE(space->Size(), growth_limit);
228
229 // footprint and size should agree with amount allocated
230 EXPECT_GE(footprint, amount_allocated);
231 EXPECT_GE(space->Size(), amount_allocated);
232
233 // Release storage in a semi-adhoc manner
234 size_t free_increment = 96;
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700235 while (true) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800236 {
237 ScopedThreadStateChange tsc(self, kNative);
238 // Give the space a haircut.
239 space->Trim();
240 }
Ian Rogers3bb17a62012-01-27 23:56:44 -0800241
242 // Bounds sanity
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700243 footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800244 EXPECT_LE(amount_allocated, growth_limit);
245 EXPECT_GE(footprint, amount_allocated);
246 EXPECT_LE(footprint, growth_limit);
247 EXPECT_GE(space->Size(), amount_allocated);
248 EXPECT_LE(space->Size(), growth_limit);
249
250 if (free_increment == 0) {
251 break;
252 }
253
Mathieu Chartier4e305412014-02-19 10:54:44 -0800254 // Free some objects
255 for (size_t i = 0; i < last_object; i += free_increment) {
256 mirror::Object* object = lots_of_objects.get()[i];
257 if (object == nullptr) {
258 continue;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800259 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800260 size_t allocation_size = space->AllocationSize(object, nullptr);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800261 if (object_size > 0) {
262 EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
263 } else {
264 EXPECT_GE(allocation_size, 8u);
265 }
266 space->Free(self, object);
267 lots_of_objects.get()[i] = nullptr;
268 amount_allocated -= allocation_size;
269 footprint = space->GetFootprint();
270 EXPECT_GE(space->Size(), footprint); // invariant
Ian Rogers3bb17a62012-01-27 23:56:44 -0800271 }
Mathieu Chartier4e305412014-02-19 10:54:44 -0800272
273 free_increment >>= 1;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800274 }
Mathieu Chartier4e305412014-02-19 10:54:44 -0800275
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700276 // The space has become empty here before allocating a large object
277 // below. For RosAlloc, revoke thread-local runs, which are kept
278 // even when empty for a performance reason, so that they won't
279 // cause the following large object allocation to fail due to
280 // potential fragmentation. Note they are normally revoked at each
281 // GC (but no GC here.)
282 space->RevokeAllThreadLocalBuffers();
283
Ian Rogers3bb17a62012-01-27 23:56:44 -0800284 // All memory was released, try a large allocation to check freed memory is being coalesced
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700285 StackHandleScope<1> hs(soa.Self());
286 auto large_object(hs.NewHandle<mirror::Object>(nullptr));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800287 size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700288 size_t bytes_allocated = 0;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700289 size_t bytes_tl_bulk_allocated;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800290 if (round <= 1) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700291 large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr,
292 &bytes_tl_bulk_allocated));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800293 } else {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700294 large_object.Assign(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700295 nullptr, &bytes_tl_bulk_allocated));
Ian Rogers3bb17a62012-01-27 23:56:44 -0800296 }
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700297 EXPECT_TRUE(large_object.Get() != nullptr);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800298
299 // Sanity check footprint
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700300 footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800301 EXPECT_LE(footprint, growth_limit);
302 EXPECT_GE(space->Size(), footprint);
303 EXPECT_LE(space->Size(), growth_limit);
304
305 // Clean up
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700306 space->Free(self, large_object.Assign(nullptr));
Mathieu Chartier4e305412014-02-19 10:54:44 -0800307
Ian Rogers3bb17a62012-01-27 23:56:44 -0800308 // Sanity check footprint
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700309 footprint = space->GetFootprint();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800310 EXPECT_LE(footprint, growth_limit);
311 EXPECT_GE(space->Size(), footprint);
312 EXPECT_LE(space->Size(), growth_limit);
313}
314
Mathieu Chartier28b1cf72016-01-15 16:44:57 -0800315template <class Super>
316void SpaceTest<Super>::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size,
317 CreateSpaceFn create_space) {
Hiroshi Yamauchi4d2efce2014-02-10 16:19:09 -0800318 if (object_size < SizeOfZeroLengthByteArray()) {
319 // Too small for the object layout/model.
320 return;
321 }
Ian Rogers3bb17a62012-01-27 23:56:44 -0800322 size_t initial_size = 4 * MB;
323 size_t growth_limit = 8 * MB;
324 size_t capacity = 16 * MB;
Mathieu Chartier4e305412014-02-19 10:54:44 -0800325 MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr));
326 ASSERT_TRUE(space != nullptr);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800327
328 // Basic sanity
329 EXPECT_EQ(space->Capacity(), growth_limit);
330 EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
331
332 // Make space findable to the heap, will also delete space when runtime is cleaned up
Mathieu Chartier590fee92013-09-13 13:46:47 -0700333 AddSpace(space);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800334
335 // In this round we don't allocate with growth and therefore can't grow past the initial size.
336 // This effectively makes the growth_limit the initial_size, so assert this.
337 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 1, initial_size);
338 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 2, growth_limit);
339 // Remove growth limit
340 space->ClearGrowthLimit();
341 EXPECT_EQ(space->Capacity(), capacity);
342 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 3, capacity);
343}
344
Andreas Gampe24651ec2014-02-27 13:26:16 -0800345#define TEST_SizeFootPrintGrowthLimitAndTrimStatic(name, spaceName, spaceFn, size) \
346 TEST_F(spaceName##StaticTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name) { \
Andreas Gampea7433512014-02-21 13:19:23 -0800347 SizeFootPrintGrowthLimitAndTrimDriver(size, spaceFn); \
Andreas Gampe24651ec2014-02-27 13:26:16 -0800348 }
349
350#define TEST_SizeFootPrintGrowthLimitAndTrimRandom(name, spaceName, spaceFn, size) \
351 TEST_F(spaceName##RandomTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \
Andreas Gampea7433512014-02-21 13:19:23 -0800352 SizeFootPrintGrowthLimitAndTrimDriver(-size, spaceFn); \
Ian Rogers3bb17a62012-01-27 23:56:44 -0800353 }
354
Andreas Gampe24651ec2014-02-27 13:26:16 -0800355#define TEST_SPACE_CREATE_FN_STATIC(spaceName, spaceFn) \
Mathieu Chartier28b1cf72016-01-15 16:44:57 -0800356 class spaceName##StaticTest : public SpaceTest<CommonRuntimeTest> { \
Andreas Gampe24651ec2014-02-27 13:26:16 -0800357 }; \
358 \
359 TEST_SizeFootPrintGrowthLimitAndTrimStatic(12B, spaceName, spaceFn, 12) \
360 TEST_SizeFootPrintGrowthLimitAndTrimStatic(16B, spaceName, spaceFn, 16) \
361 TEST_SizeFootPrintGrowthLimitAndTrimStatic(24B, spaceName, spaceFn, 24) \
362 TEST_SizeFootPrintGrowthLimitAndTrimStatic(32B, spaceName, spaceFn, 32) \
363 TEST_SizeFootPrintGrowthLimitAndTrimStatic(64B, spaceName, spaceFn, 64) \
364 TEST_SizeFootPrintGrowthLimitAndTrimStatic(128B, spaceName, spaceFn, 128) \
365 TEST_SizeFootPrintGrowthLimitAndTrimStatic(1KB, spaceName, spaceFn, 1 * KB) \
366 TEST_SizeFootPrintGrowthLimitAndTrimStatic(4KB, spaceName, spaceFn, 4 * KB) \
367 TEST_SizeFootPrintGrowthLimitAndTrimStatic(1MB, spaceName, spaceFn, 1 * MB) \
368 TEST_SizeFootPrintGrowthLimitAndTrimStatic(4MB, spaceName, spaceFn, 4 * MB) \
369 TEST_SizeFootPrintGrowthLimitAndTrimStatic(8MB, spaceName, spaceFn, 8 * MB)
370
371#define TEST_SPACE_CREATE_FN_RANDOM(spaceName, spaceFn) \
Mathieu Chartier28b1cf72016-01-15 16:44:57 -0800372 class spaceName##RandomTest : public SpaceTest<CommonRuntimeTest> { \
Andreas Gampe24651ec2014-02-27 13:26:16 -0800373 }; \
374 \
375 TEST_SizeFootPrintGrowthLimitAndTrimRandom(16B, spaceName, spaceFn, 16) \
376 TEST_SizeFootPrintGrowthLimitAndTrimRandom(24B, spaceName, spaceFn, 24) \
377 TEST_SizeFootPrintGrowthLimitAndTrimRandom(32B, spaceName, spaceFn, 32) \
378 TEST_SizeFootPrintGrowthLimitAndTrimRandom(64B, spaceName, spaceFn, 64) \
379 TEST_SizeFootPrintGrowthLimitAndTrimRandom(128B, spaceName, spaceFn, 128) \
380 TEST_SizeFootPrintGrowthLimitAndTrimRandom(1KB, spaceName, spaceFn, 1 * KB) \
381 TEST_SizeFootPrintGrowthLimitAndTrimRandom(4KB, spaceName, spaceFn, 4 * KB) \
382 TEST_SizeFootPrintGrowthLimitAndTrimRandom(1MB, spaceName, spaceFn, 1 * MB) \
383 TEST_SizeFootPrintGrowthLimitAndTrimRandom(4MB, spaceName, spaceFn, 4 * MB) \
384 TEST_SizeFootPrintGrowthLimitAndTrimRandom(8MB, spaceName, spaceFn, 8 * MB)
Ian Rogers3bb17a62012-01-27 23:56:44 -0800385
Ian Rogers1d54e732013-05-02 21:10:01 -0700386} // namespace space
387} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -0700388} // namespace art
Andreas Gampea7433512014-02-21 13:19:23 -0800389
390#endif // ART_RUNTIME_GC_SPACE_SPACE_TEST_H_