blob: 5ec33350e9d5d5cbd6382385bccdd0a8bc131c5e [file] [log] [blame]
Brian Carlstrom9004cb62013-07-26 15:48:31 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
Ian Rogers700a4022014-05-19 16:49:03 -070019#include <memory>
20
Brian Carlstrom9004cb62013-07-26 15:48:31 -070021#include "gtest/gtest.h"
22
23namespace art {
24
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070025class MemMapTest : public testing::Test {
26 public:
Ian Rogersef7d42f2014-01-06 12:55:46 -080027 static byte* BaseBegin(MemMap* mem_map) {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070028 return reinterpret_cast<byte*>(mem_map->base_begin_);
29 }
Ian Rogersef7d42f2014-01-06 12:55:46 -080030 static size_t BaseSize(MemMap* mem_map) {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070031 return mem_map->base_size_;
32 }
Ian Rogersef7d42f2014-01-06 12:55:46 -080033
34 static void RemapAtEndTest(bool low_4gb) {
35 std::string error_msg;
36 // Cast the page size to size_t.
37 const size_t page_size = static_cast<size_t>(kPageSize);
38 // Map a two-page memory region.
39 MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
40 nullptr,
41 2 * page_size,
42 PROT_READ | PROT_WRITE,
43 low_4gb,
44 &error_msg);
45 // Check its state and write to it.
46 byte* base0 = m0->Begin();
47 ASSERT_TRUE(base0 != nullptr) << error_msg;
48 size_t size0 = m0->Size();
49 EXPECT_EQ(m0->Size(), 2 * page_size);
50 EXPECT_EQ(BaseBegin(m0), base0);
51 EXPECT_EQ(BaseSize(m0), size0);
52 memset(base0, 42, 2 * page_size);
53 // Remap the latter half into a second MemMap.
54 MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
55 "MemMapTest_RemapAtEndTest_map1",
56 PROT_READ | PROT_WRITE,
57 &error_msg);
58 // Check the states of the two maps.
59 EXPECT_EQ(m0->Begin(), base0) << error_msg;
60 EXPECT_EQ(m0->Size(), page_size);
61 EXPECT_EQ(BaseBegin(m0), base0);
62 EXPECT_EQ(BaseSize(m0), page_size);
63 byte* base1 = m1->Begin();
64 size_t size1 = m1->Size();
65 EXPECT_EQ(base1, base0 + page_size);
66 EXPECT_EQ(size1, page_size);
67 EXPECT_EQ(BaseBegin(m1), base1);
68 EXPECT_EQ(BaseSize(m1), size1);
69 // Write to the second region.
70 memset(base1, 43, page_size);
71 // Check the contents of the two regions.
72 for (size_t i = 0; i < page_size; ++i) {
73 EXPECT_EQ(base0[i], 42);
74 }
75 for (size_t i = 0; i < page_size; ++i) {
76 EXPECT_EQ(base1[i], 43);
77 }
78 // Unmap the first region.
79 delete m0;
80 // Make sure the second region is still accessible after the first
81 // region is unmapped.
82 for (size_t i = 0; i < page_size; ++i) {
83 EXPECT_EQ(base1[i], 43);
84 }
85 delete m1;
86 }
Andreas Gamped8f26db2014-05-19 17:01:13 -070087
Mathieu Chartierc54e12a2014-10-14 16:22:41 -070088 void CommonInit() {
89 MemMap::Init();
90 }
91
Andreas Gamped8f26db2014-05-19 17:01:13 -070092#if defined(__LP64__) && !defined(__x86_64__)
93 static uintptr_t GetLinearScanPos() {
94 return MemMap::next_mem_pos_;
95 }
96#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070097};
Brian Carlstrom9004cb62013-07-26 15:48:31 -070098
Andreas Gamped8f26db2014-05-19 17:01:13 -070099#if defined(__LP64__) && !defined(__x86_64__)
100
101#ifdef __BIONIC__
102extern uintptr_t CreateStartPos(uint64_t input);
103#endif
104
105TEST_F(MemMapTest, Start) {
Mathieu Chartierc54e12a2014-10-14 16:22:41 -0700106 CommonInit();
Andreas Gamped8f26db2014-05-19 17:01:13 -0700107 uintptr_t start = GetLinearScanPos();
108 EXPECT_LE(64 * KB, start);
109 EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
Andreas Gamped8f26db2014-05-19 17:01:13 -0700110#ifdef __BIONIC__
111 // Test a couple of values. Make sure they are different.
112 uintptr_t last = 0;
113 for (size_t i = 0; i < 100; ++i) {
114 uintptr_t random_start = CreateStartPos(i * kPageSize);
115 EXPECT_NE(last, random_start);
116 last = random_start;
117 }
118
119 // Even on max, should be below ART_BASE_ADDRESS.
120 EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
121#endif
122 // End of test.
123}
124#endif
125
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700126TEST_F(MemMapTest, MapAnonymousEmpty) {
Mathieu Chartierc54e12a2014-10-14 16:22:41 -0700127 CommonInit();
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700128 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700129 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
Ian Rogersef7d42f2014-01-06 12:55:46 -0800130 nullptr,
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700131 0,
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700132 PROT_READ,
Ian Rogersef7d42f2014-01-06 12:55:46 -0800133 false,
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700134 &error_msg));
Ian Rogersef7d42f2014-01-06 12:55:46 -0800135 ASSERT_TRUE(map.get() != nullptr) << error_msg;
136 ASSERT_TRUE(error_msg.empty());
137 map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
138 nullptr,
139 kPageSize,
140 PROT_READ | PROT_WRITE,
141 false,
142 &error_msg));
143 ASSERT_TRUE(map.get() != nullptr) << error_msg;
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700144 ASSERT_TRUE(error_msg.empty());
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700145}
146
Ian Rogersef7d42f2014-01-06 12:55:46 -0800147#ifdef __LP64__
148TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
Mathieu Chartierc54e12a2014-10-14 16:22:41 -0700149 CommonInit();
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700150 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700151 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
Ian Rogersef7d42f2014-01-06 12:55:46 -0800152 nullptr,
153 kPageSize,
154 PROT_READ | PROT_WRITE,
155 true,
156 &error_msg));
157 ASSERT_TRUE(map.get() != nullptr) << error_msg;
158 ASSERT_TRUE(error_msg.empty());
159 ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700160}
Ian Rogersef7d42f2014-01-06 12:55:46 -0800161#endif
162
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700163TEST_F(MemMapTest, MapAnonymousExactAddr) {
Mathieu Chartierc54e12a2014-10-14 16:22:41 -0700164 CommonInit();
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700165 std::string error_msg;
166 // Map at an address that should work, which should succeed.
Ian Rogers700a4022014-05-19 16:49:03 -0700167 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700168 reinterpret_cast<byte*>(ART_BASE_ADDRESS),
169 kPageSize,
170 PROT_READ | PROT_WRITE,
171 false,
172 &error_msg));
173 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
174 ASSERT_TRUE(error_msg.empty());
175 ASSERT_TRUE(map0->BaseBegin() == reinterpret_cast<void*>(ART_BASE_ADDRESS));
176 // Map at an unspecified address, which should succeed.
Ian Rogers700a4022014-05-19 16:49:03 -0700177 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700178 nullptr,
179 kPageSize,
180 PROT_READ | PROT_WRITE,
181 false,
182 &error_msg));
183 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
184 ASSERT_TRUE(error_msg.empty());
185 ASSERT_TRUE(map1->BaseBegin() != nullptr);
186 // Attempt to map at the same address, which should fail.
Ian Rogers700a4022014-05-19 16:49:03 -0700187 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700188 reinterpret_cast<byte*>(map1->BaseBegin()),
189 kPageSize,
190 PROT_READ | PROT_WRITE,
191 false,
192 &error_msg));
193 ASSERT_TRUE(map2.get() == nullptr) << error_msg;
194 ASSERT_TRUE(!error_msg.empty());
195}
196
Ian Rogersef7d42f2014-01-06 12:55:46 -0800197TEST_F(MemMapTest, RemapAtEnd) {
198 RemapAtEndTest(false);
199}
200
201#ifdef __LP64__
202TEST_F(MemMapTest, RemapAtEnd32bit) {
203 RemapAtEndTest(true);
204}
205#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700206
Qiming Shi84d49cc2014-04-24 15:38:41 +0800207TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
Douglas Leung859c2552014-06-11 11:47:09 -0700208 uintptr_t start_addr = ART_BASE_ADDRESS + 0x1000000;
Qiming Shi84d49cc2014-04-24 15:38:41 +0800209 std::string error_msg;
Mathieu Chartierc54e12a2014-10-14 16:22:41 -0700210 CommonInit();
Ian Rogers700a4022014-05-19 16:49:03 -0700211 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
Douglas Leung859c2552014-06-11 11:47:09 -0700212 reinterpret_cast<byte*>(start_addr),
Qiming Shi84d49cc2014-04-24 15:38:41 +0800213 0x21000000,
214 PROT_READ | PROT_WRITE,
215 true,
216 &error_msg));
217 ASSERT_TRUE(map.get() != nullptr) << error_msg;
218 ASSERT_TRUE(error_msg.empty());
Douglas Leung859c2552014-06-11 11:47:09 -0700219 ASSERT_EQ(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), start_addr);
Qiming Shi84d49cc2014-04-24 15:38:41 +0800220}
221
222TEST_F(MemMapTest, MapAnonymousOverflow) {
Mathieu Chartierc54e12a2014-10-14 16:22:41 -0700223 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800224 std::string error_msg;
225 uintptr_t ptr = 0;
226 ptr -= kPageSize; // Now it's close to the top.
Ian Rogers700a4022014-05-19 16:49:03 -0700227 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
Qiming Shi84d49cc2014-04-24 15:38:41 +0800228 reinterpret_cast<byte*>(ptr),
229 2 * kPageSize, // brings it over the top.
230 PROT_READ | PROT_WRITE,
231 false,
232 &error_msg));
233 ASSERT_EQ(nullptr, map.get());
234 ASSERT_FALSE(error_msg.empty());
235}
236
237#ifdef __LP64__
238TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
Mathieu Chartierc54e12a2014-10-14 16:22:41 -0700239 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800240 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700241 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
Qiming Shi84d49cc2014-04-24 15:38:41 +0800242 reinterpret_cast<byte*>(UINT64_C(0x100000000)),
243 kPageSize,
244 PROT_READ | PROT_WRITE,
245 true,
246 &error_msg));
247 ASSERT_EQ(nullptr, map.get());
248 ASSERT_FALSE(error_msg.empty());
249}
250
251TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
Mathieu Chartierc54e12a2014-10-14 16:22:41 -0700252 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800253 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700254 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
Qiming Shi84d49cc2014-04-24 15:38:41 +0800255 reinterpret_cast<byte*>(0xF0000000),
256 0x20000000,
257 PROT_READ | PROT_WRITE,
258 true,
259 &error_msg));
260 ASSERT_EQ(nullptr, map.get());
261 ASSERT_FALSE(error_msg.empty());
262}
263#endif
264
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700265TEST_F(MemMapTest, CheckNoGaps) {
Mathieu Chartierc54e12a2014-10-14 16:22:41 -0700266 CommonInit();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700267 std::string error_msg;
268 constexpr size_t kNumPages = 3;
269 // Map a 3-page mem map.
270 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
271 nullptr,
272 kPageSize * kNumPages,
273 PROT_READ | PROT_WRITE,
274 false,
275 &error_msg));
276 ASSERT_TRUE(map.get() != nullptr) << error_msg;
277 ASSERT_TRUE(error_msg.empty());
278 // Record the base address.
279 byte* map_base = reinterpret_cast<byte*>(map->BaseBegin());
280 // Unmap it.
281 map.reset();
282
283 // Map at the same address, but in page-sized separate mem maps,
284 // assuming the space at the address is still available.
285 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
286 map_base,
287 kPageSize,
288 PROT_READ | PROT_WRITE,
289 false,
290 &error_msg));
291 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
292 ASSERT_TRUE(error_msg.empty());
293 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
294 map_base + kPageSize,
295 kPageSize,
296 PROT_READ | PROT_WRITE,
297 false,
298 &error_msg));
299 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
300 ASSERT_TRUE(error_msg.empty());
301 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
302 map_base + kPageSize * 2,
303 kPageSize,
304 PROT_READ | PROT_WRITE,
305 false,
306 &error_msg));
307 ASSERT_TRUE(map2.get() != nullptr) << error_msg;
308 ASSERT_TRUE(error_msg.empty());
309
310 // One-map cases.
311 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
312 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
313 ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
314
315 // Two or three-map cases.
316 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
317 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
318 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
319
320 // Unmap the middle one.
321 map1.reset();
322
323 // Should return false now that there's a gap in the middle.
324 ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
325}
326
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700327} // namespace art