blob: 3781c6f99dc3bba52584d9e6738f06ac2e4ca2e5 [file] [log] [blame]
Brian Carlstrom27ec9612011-09-19 20:20:38 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
Evgenii Stepanov1e133742015-05-20 12:30:59 -070019#include "base/memory_tool.h"
Christopher Ferris943af7d2014-01-16 12:41:46 -080020#include <backtrace/BacktraceMap.h>
Ian Rogersc7dd2952014-10-21 23:31:19 -070021#include <inttypes.h>
Josh Gao0389cd52015-09-16 16:27:00 -070022#include <stdlib.h>
Ian Rogersc7dd2952014-10-21 23:31:19 -070023
Ian Rogers700a4022014-05-19 16:49:03 -070024#include <memory>
Ian Rogersc7dd2952014-10-21 23:31:19 -070025#include <sstream>
Elliott Hughesecd3a6f2012-06-06 18:16:37 -070026
Elliott Hughese222ee02012-12-13 14:41:43 -080027#include "base/stringprintf.h"
Andreas Gampe277ccbd2014-11-03 21:36:10 -080028
29#pragma GCC diagnostic push
30#pragma GCC diagnostic ignored "-Wshadow"
Elliott Hughese222ee02012-12-13 14:41:43 -080031#include "ScopedFd.h"
Andreas Gampe277ccbd2014-11-03 21:36:10 -080032#pragma GCC diagnostic pop
33
Ian Rogersc7dd2952014-10-21 23:31:19 -070034#include "thread-inl.h"
Elliott Hughese222ee02012-12-13 14:41:43 -080035#include "utils.h"
36
Elliott Hughes6c9c06d2011-11-07 16:43:47 -080037#include <cutils/ashmem.h>
Nicolas Geoffraya25dce92016-01-12 16:41:10 +000038
Ian Rogers997f0f92014-06-21 22:58:05 -070039#ifndef ANDROID_OS
40#include <sys/resource.h>
41#endif
Elliott Hughes6c9c06d2011-11-07 16:43:47 -080042
Ian Rogersd6b68652014-06-23 14:07:03 -070043#ifndef MAP_ANONYMOUS
44#define MAP_ANONYMOUS MAP_ANON
45#endif
46
Brian Carlstrom27ec9612011-09-19 20:20:38 -070047namespace art {
48
Christopher Ferris943af7d2014-01-16 12:41:46 -080049static std::ostream& operator<<(
50 std::ostream& os,
51 std::pair<BacktraceMap::const_iterator, BacktraceMap::const_iterator> iters) {
52 for (BacktraceMap::const_iterator it = iters.first; it != iters.second; ++it) {
53 os << StringPrintf("0x%08x-0x%08x %c%c%c %s\n",
54 static_cast<uint32_t>(it->start),
55 static_cast<uint32_t>(it->end),
56 (it->flags & PROT_READ) ? 'r' : '-',
57 (it->flags & PROT_WRITE) ? 'w' : '-',
58 (it->flags & PROT_EXEC) ? 'x' : '-', it->name.c_str());
Elliott Hughesecd3a6f2012-06-06 18:16:37 -070059 }
60 return os;
Brian Carlstrom27ec9612011-09-19 20:20:38 -070061}
62
Mathieu Chartierbad02672014-08-25 13:08:22 -070063std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps) {
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -070064 os << "MemMap:" << std::endl;
65 for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
66 void* base = it->first;
67 MemMap* map = it->second;
68 CHECK_EQ(base, map->BaseBegin());
69 os << *map << std::endl;
70 }
71 return os;
72}
73
Mathieu Chartier6e88ef62014-10-14 15:01:24 -070074MemMap::Maps* MemMap::maps_ = nullptr;
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -070075
Ian Rogersc3ccc102014-06-25 11:52:14 -070076#if USE_ART_LOW_4G_ALLOCATOR
Andreas Gamped8f26db2014-05-19 17:01:13 -070077// Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
78
79// The regular start of memory allocations. The first 64KB is protected by SELinux.
Andreas Gampe6bd621a2014-05-16 17:28:58 -070080static constexpr uintptr_t LOW_MEM_START = 64 * KB;
Andreas Gampe7104cbf2014-03-21 11:44:43 -070081
Andreas Gamped8f26db2014-05-19 17:01:13 -070082// Generate random starting position.
83// To not interfere with image position, take the image's address and only place it below. Current
84// formula (sketch):
85//
86// ART_BASE_ADDR = 0001XXXXXXXXXXXXXXX
87// ----------------------------------------
88// = 0000111111111111111
89// & ~(kPageSize - 1) =~0000000000000001111
90// ----------------------------------------
91// mask = 0000111111111110000
92// & random data = YYYYYYYYYYYYYYYYYYY
93// -----------------------------------
94// tmp = 0000YYYYYYYYYYY0000
95// + LOW_MEM_START = 0000000000001000000
96// --------------------------------------
97// start
98//
Josh Gao0389cd52015-09-16 16:27:00 -070099// arc4random as an entropy source is exposed in Bionic, but not in glibc. When we
Andreas Gamped8f26db2014-05-19 17:01:13 -0700100// do not have Bionic, simply start with LOW_MEM_START.
101
102// Function is standalone so it can be tested somewhat in mem_map_test.cc.
103#ifdef __BIONIC__
104uintptr_t CreateStartPos(uint64_t input) {
105 CHECK_NE(0, ART_BASE_ADDRESS);
106
107 // Start with all bits below highest bit in ART_BASE_ADDRESS.
108 constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS));
109 constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1;
110
111 // Lowest (usually 12) bits are not used, as aligned by page size.
112 constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1);
113
114 // Mask input data.
115 return (input & mask) + LOW_MEM_START;
116}
117#endif
118
119static uintptr_t GenerateNextMemPos() {
120#ifdef __BIONIC__
Josh Gao0389cd52015-09-16 16:27:00 -0700121 uint64_t random_data;
122 arc4random_buf(&random_data, sizeof(random_data));
123 return CreateStartPos(random_data);
Andreas Gamped8f26db2014-05-19 17:01:13 -0700124#else
Josh Gao0389cd52015-09-16 16:27:00 -0700125 // No arc4random on host, see above.
Andreas Gamped8f26db2014-05-19 17:01:13 -0700126 return LOW_MEM_START;
127#endif
128}
129
130// Initialize linear scan to random position.
131uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000132#endif
133
Mathieu Chartier24a0fc82015-10-13 16:38:52 -0700134// Return true if the address range is contained in a single memory map by either reading
135// the maps_ variable or the /proc/self/map entry.
Mathieu Chartiere58991b2015-10-13 07:59:34 -0700136bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg) {
Vladimir Marko5c42c292015-02-25 12:02:49 +0000137 uintptr_t begin = reinterpret_cast<uintptr_t>(ptr);
138 uintptr_t end = begin + size;
Mathieu Chartiere58991b2015-10-13 07:59:34 -0700139
Mathieu Chartier24a0fc82015-10-13 16:38:52 -0700140 // There is a suspicion that BacktraceMap::Create is occasionally missing maps. TODO: Investigate
141 // further.
Mathieu Chartiere58991b2015-10-13 07:59:34 -0700142 {
143 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
Mathieu Chartier24a0fc82015-10-13 16:38:52 -0700144 for (auto& pair : *maps_) {
Mathieu Chartiere58991b2015-10-13 07:59:34 -0700145 MemMap* const map = pair.second;
146 if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) &&
147 end <= reinterpret_cast<uintptr_t>(map->End())) {
148 return true;
149 }
150 }
151 }
152
Jim_Guoa62a5882014-04-28 11:11:57 +0800153 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
Mathieu Chartierebe2dfc2015-11-24 13:47:52 -0800154 if (map == nullptr) {
155 if (error_msg != nullptr) {
156 *error_msg = StringPrintf("Failed to build process map");
157 }
Jim_Guoa62a5882014-04-28 11:11:57 +0800158 return false;
159 }
Christopher Ferris56f8b562016-06-16 23:19:36 -0700160
161 ScopedBacktraceMapIteratorLock lock(map.get());
Jim_Guoa62a5882014-04-28 11:11:57 +0800162 for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
163 if ((begin >= it->start && begin < it->end) // start of new within old
164 && (end > it->start && end <= it->end)) { // end of new within old
165 return true;
166 }
167 }
Mathieu Chartierebe2dfc2015-11-24 13:47:52 -0800168 if (error_msg != nullptr) {
169 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
170 *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
171 "any existing map. See process maps in the log.", begin, end);
172 }
Jim_Guoa62a5882014-04-28 11:11:57 +0800173 return false;
174}
175
176// Return true if the address range does not conflict with any /proc/self/maps entry.
177static bool CheckNonOverlapping(uintptr_t begin,
178 uintptr_t end,
179 std::string* error_msg) {
180 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
Christopher Ferris836572a2014-08-05 15:43:13 -0700181 if (map.get() == nullptr) {
Jim_Guoa62a5882014-04-28 11:11:57 +0800182 *error_msg = StringPrintf("Failed to build process map");
183 return false;
184 }
Christopher Ferris56f8b562016-06-16 23:19:36 -0700185 ScopedBacktraceMapIteratorLock(map.get());
Jim_Guoa62a5882014-04-28 11:11:57 +0800186 for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
187 if ((begin >= it->start && begin < it->end) // start of new within old
188 || (end > it->start && end < it->end) // end of new within old
189 || (begin <= it->start && end > it->end)) { // start/end of new includes all of old
190 std::ostringstream map_info;
191 map_info << std::make_pair(it, map->end());
192 *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
193 "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s",
194 begin, end,
195 static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
196 it->name.c_str(),
197 map_info.str().c_str());
198 return false;
199 }
200 }
201 return true;
202}
203
204// CheckMapRequest to validate a non-MAP_FAILED mmap result based on
205// the expected value, calling munmap if validation fails, giving the
206// reason in error_msg.
207//
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700208// If the expected_ptr is null, nothing is checked beyond the fact
Jim_Guoa62a5882014-04-28 11:11:57 +0800209// that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
210// non-null, we check that pointer is the actual_ptr == expected_ptr,
211// and if not, report in error_msg what the conflict mapping was if
212// found, or a generic error in other cases.
Ian Rogers13735952014-10-08 12:43:28 -0700213static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
Jim_Guoa62a5882014-04-28 11:11:57 +0800214 std::string* error_msg) {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700215 // Handled first by caller for more specific error messages.
216 CHECK(actual_ptr != MAP_FAILED);
217
218 if (expected_ptr == nullptr) {
219 return true;
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700220 }
Elliott Hughesecd3a6f2012-06-06 18:16:37 -0700221
Jim_Guoa62a5882014-04-28 11:11:57 +0800222 uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
223 uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
224 uintptr_t limit = expected + byte_count;
225
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700226 if (expected_ptr == actual_ptr) {
227 return true;
228 }
229
230 // We asked for an address but didn't get what we wanted, all paths below here should fail.
231 int result = munmap(actual_ptr, byte_count);
232 if (result == -1) {
233 PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
234 }
235
Mathieu Chartierebe2dfc2015-11-24 13:47:52 -0800236 if (error_msg != nullptr) {
Mathieu Chartier83723ae2016-02-24 10:09:23 -0800237 // We call this here so that we can try and generate a full error
238 // message with the overlapping mapping. There's no guarantee that
239 // that there will be an overlap though, since
240 // - The kernel is not *required* to honor expected_ptr unless MAP_FIXED is
241 // true, even if there is no overlap
242 // - There might have been an overlap at the point of mmap, but the
243 // overlapping region has since been unmapped.
244 std::string error_detail;
245 CheckNonOverlapping(expected, limit, &error_detail);
Mathieu Chartierebe2dfc2015-11-24 13:47:52 -0800246 std::ostringstream os;
247 os << StringPrintf("Failed to mmap at expected address, mapped at "
248 "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR,
249 actual, expected);
250 if (!error_detail.empty()) {
251 os << " : " << error_detail;
252 }
253 *error_msg = os.str();
Christopher Ferris943af7d2014-01-16 12:41:46 -0800254 }
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700255 return false;
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700256}
257
Mathieu Chartier38c82212015-06-04 16:22:41 -0700258#if USE_ART_LOW_4G_ALLOCATOR
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800259static inline void* TryMemMapLow4GB(void* ptr,
260 size_t page_aligned_byte_count,
261 int prot,
262 int flags,
263 int fd,
264 off_t offset) {
265 void* actual = mmap(ptr, page_aligned_byte_count, prot, flags, fd, offset);
Mathieu Chartier38c82212015-06-04 16:22:41 -0700266 if (actual != MAP_FAILED) {
267 // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
268 // 4GB. If this is the case, unmap and retry.
269 if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) {
270 munmap(actual, page_aligned_byte_count);
271 actual = MAP_FAILED;
272 }
273 }
274 return actual;
275}
276#endif
277
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800278MemMap* MemMap::MapAnonymous(const char* name,
279 uint8_t* expected_ptr,
280 size_t byte_count,
281 int prot,
282 bool low_4gb,
283 bool reuse,
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000284 std::string* error_msg,
285 bool use_ashmem) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700286#ifndef __LP64__
287 UNUSED(low_4gb);
288#endif
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700289 if (byte_count == 0) {
Jim_Guoa62a5882014-04-28 11:11:57 +0800290 return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700291 }
Elliott Hughesecd3a6f2012-06-06 18:16:37 -0700292 size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800293
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800294 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
Vladimir Marko5c42c292015-02-25 12:02:49 +0000295 if (reuse) {
296 // reuse means it is okay that it overlaps an existing page mapping.
297 // Only use this if you actually made the page reservation yourself.
298 CHECK(expected_ptr != nullptr);
299
Vladimir Markob5505822015-05-08 11:10:16 +0100300 DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg;
Vladimir Marko5c42c292015-02-25 12:02:49 +0000301 flags |= MAP_FIXED;
302 }
303
Ian Rogers997f0f92014-06-21 22:58:05 -0700304 ScopedFd fd(-1);
305
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000306 if (use_ashmem) {
307 if (!kIsTargetBuild) {
Bilyan Borisov3071f802016-03-31 17:15:53 +0100308 // When not on Android (either host or assuming a linux target) ashmem is faked using
309 // files in /tmp. Ensure that such files won't fail due to ulimit restrictions. If they
310 // will then use a regular mmap.
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000311 struct rlimit rlimit_fsize;
312 CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0);
313 use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) ||
314 (page_aligned_byte_count < rlimit_fsize.rlim_cur);
315 }
316 }
317
Ian Rogers997f0f92014-06-21 22:58:05 -0700318 if (use_ashmem) {
319 // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
320 // prefixed "dalvik-".
321 std::string debug_friendly_name("dalvik-");
322 debug_friendly_name += name;
323 fd.reset(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count));
324 if (fd.get() == -1) {
325 *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno));
326 return nullptr;
327 }
Vladimir Marko5c42c292015-02-25 12:02:49 +0000328 flags &= ~MAP_ANONYMOUS;
Ian Rogers997f0f92014-06-21 22:58:05 -0700329 }
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000330
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700331 // We need to store and potentially set an error number for pretty printing of errors
332 int saved_errno = 0;
333
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800334 void* actual = MapInternal(expected_ptr,
335 page_aligned_byte_count,
336 prot,
337 flags,
338 fd.get(),
339 0,
340 low_4gb);
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700341 saved_errno = errno;
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000342
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700343 if (actual == MAP_FAILED) {
Mathieu Chartier83723ae2016-02-24 10:09:23 -0800344 if (error_msg != nullptr) {
345 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700346
Mathieu Chartier83723ae2016-02-24 10:09:23 -0800347 *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. "
348 "See process maps in the log.",
349 expected_ptr,
350 page_aligned_byte_count,
351 prot,
352 flags,
353 fd.get(),
354 strerror(saved_errno));
355 }
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700356 return nullptr;
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700357 }
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700358 std::ostringstream check_map_request_error_msg;
Jim_Guoa62a5882014-04-28 11:11:57 +0800359 if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700360 return nullptr;
361 }
Ian Rogers13735952014-10-08 12:43:28 -0700362 return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual,
Mathieu Chartier01d4b502015-06-12 17:32:31 -0700363 page_aligned_byte_count, prot, reuse);
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700364}
365
David Srbecky1baabf02015-06-16 17:12:34 +0000366MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
367 if (byte_count == 0) {
368 return new MemMap(name, nullptr, 0, nullptr, 0, 0, false);
369 }
370 const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
371 return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
372}
373
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800374MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
375 size_t byte_count,
376 int prot,
377 int flags,
378 int fd,
379 off_t start,
380 bool low_4gb,
381 bool reuse,
382 const char* filename,
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700383 std::string* error_msg) {
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700384 CHECK_NE(0, prot);
385 CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
Narayan Kamathb89c3da2014-08-21 17:38:09 +0100386
387 // Note that we do not allow MAP_FIXED unless reuse == true, i.e we
388 // expect his mapping to be contained within an existing map.
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700389 if (reuse) {
390 // reuse means it is okay that it overlaps an existing page mapping.
391 // Only use this if you actually made the page reservation yourself.
Jim_Guoa62a5882014-04-28 11:11:57 +0800392 CHECK(expected_ptr != nullptr);
Narayan Kamathb89c3da2014-08-21 17:38:09 +0100393
Mathieu Chartierebe2dfc2015-11-24 13:47:52 -0800394 DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg))
395 << ((error_msg != nullptr) ? *error_msg : std::string());
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700396 flags |= MAP_FIXED;
397 } else {
398 CHECK_EQ(0, flags & MAP_FIXED);
Narayan Kamathb89c3da2014-08-21 17:38:09 +0100399 // Don't bother checking for an overlapping region here. We'll
400 // check this if required after the fact inside CheckMapRequest.
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700401 }
402
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700403 if (byte_count == 0) {
Jim_Guoa62a5882014-04-28 11:11:57 +0800404 return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false);
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700405 }
Ian Rogersf8adc602013-04-18 17:06:19 -0700406 // Adjust 'offset' to be page-aligned as required by mmap.
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700407 int page_offset = start % kPageSize;
408 off_t page_aligned_offset = start - page_offset;
Ian Rogersf8adc602013-04-18 17:06:19 -0700409 // Adjust 'byte_count' to be page-aligned as we will map this anyway.
Elliott Hughesecd3a6f2012-06-06 18:16:37 -0700410 size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
Jim_Guoa62a5882014-04-28 11:11:57 +0800411 // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
412 // not necessarily to virtual memory. mmap will page align 'expected' for us.
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700413 uint8_t* page_aligned_expected =
414 (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700415
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700416 size_t redzone_size = 0;
417 if (RUNNING_ON_MEMORY_TOOL && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
418 redzone_size = kPageSize;
419 page_aligned_byte_count += redzone_size;
420 }
421
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800422 uint8_t* actual = reinterpret_cast<uint8_t*>(MapInternal(page_aligned_expected,
423 page_aligned_byte_count,
424 prot,
425 flags,
426 fd,
427 page_aligned_offset,
428 low_4gb));
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700429 if (actual == MAP_FAILED) {
Mathieu Chartierebe2dfc2015-11-24 13:47:52 -0800430 if (error_msg != nullptr) {
431 auto saved_errno = errno;
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700432
Andreas Gampe7ec09042016-04-01 17:20:49 -0700433 if (kIsDebugBuild || VLOG_IS_ON(oat)) {
434 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
435 }
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700436
Mathieu Chartierebe2dfc2015-11-24 13:47:52 -0800437 *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
438 ") of file '%s' failed: %s. See process maps in the log.",
439 page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
440 static_cast<int64_t>(page_aligned_offset), filename,
441 strerror(saved_errno));
442 }
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700443 return nullptr;
444 }
445 std::ostringstream check_map_request_error_msg;
Jim_Guoa62a5882014-04-28 11:11:57 +0800446 if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700447 return nullptr;
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700448 }
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700449 if (redzone_size != 0) {
450 const uint8_t *real_start = actual + page_offset;
451 const uint8_t *real_end = actual + page_offset + byte_count;
452 const uint8_t *mapping_end = actual + page_aligned_byte_count;
453
454 MEMORY_TOOL_MAKE_NOACCESS(actual, real_start - actual);
455 MEMORY_TOOL_MAKE_NOACCESS(real_end, mapping_end - real_end);
456 page_aligned_byte_count -= redzone_size;
457 }
458
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800459 return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700460 prot, reuse, redzone_size);
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700461}
462
463MemMap::~MemMap() {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700464 if (base_begin_ == nullptr && base_size_ == 0) {
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700465 return;
466 }
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700467
468 // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned
469 // before it is returned to the system.
470 if (redzone_size_ != 0) {
471 MEMORY_TOOL_MAKE_UNDEFINED(
472 reinterpret_cast<char*>(base_begin_) + base_size_ - redzone_size_,
473 redzone_size_);
474 }
475
Jim_Guoa62a5882014-04-28 11:11:57 +0800476 if (!reuse_) {
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700477 MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
Jim_Guoa62a5882014-04-28 11:11:57 +0800478 int result = munmap(base_begin_, base_size_);
479 if (result == -1) {
480 PLOG(FATAL) << "munmap failed";
481 }
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700482 }
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700483
484 // Remove it from maps_.
485 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
486 bool found = false;
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700487 DCHECK(maps_ != nullptr);
488 for (auto it = maps_->lower_bound(base_begin_), end = maps_->end();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700489 it != end && it->first == base_begin_; ++it) {
490 if (it->second == this) {
491 found = true;
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700492 maps_->erase(it);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700493 break;
494 }
495 }
496 CHECK(found) << "MemMap not found";
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700497}
498
Ian Rogers13735952014-10-08 12:43:28 -0700499MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700500 size_t base_size, int prot, bool reuse, size_t redzone_size)
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700501 : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700502 prot_(prot), reuse_(reuse), redzone_size_(redzone_size) {
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700503 if (size_ == 0) {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700504 CHECK(begin_ == nullptr);
505 CHECK(base_begin_ == nullptr);
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700506 CHECK_EQ(base_size_, 0U);
507 } else {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700508 CHECK(begin_ != nullptr);
509 CHECK(base_begin_ != nullptr);
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700510 CHECK_NE(base_size_, 0U);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700511
512 // Add it to maps_.
513 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700514 DCHECK(maps_ != nullptr);
515 maps_->insert(std::make_pair(base_begin_, this));
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700516 }
Andreas Gampec8ccf682014-09-29 20:07:43 -0700517}
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700518
Ian Rogers13735952014-10-08 12:43:28 -0700519MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000520 std::string* error_msg, bool use_ashmem) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700521 DCHECK_GE(new_end, Begin());
522 DCHECK_LE(new_end, End());
Ian Rogers13735952014-10-08 12:43:28 -0700523 DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
Roland Levillain14d90572015-07-16 10:52:26 +0100524 DCHECK_ALIGNED(begin_, kPageSize);
525 DCHECK_ALIGNED(base_begin_, kPageSize);
526 DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize);
527 DCHECK_ALIGNED(new_end, kPageSize);
Ian Rogers13735952014-10-08 12:43:28 -0700528 uint8_t* old_end = begin_ + size_;
529 uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
530 uint8_t* new_base_end = new_end;
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700531 DCHECK_LE(new_base_end, old_base_end);
532 if (new_base_end == old_base_end) {
Jim_Guoa62a5882014-04-28 11:11:57 +0800533 return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700534 }
Ian Rogers13735952014-10-08 12:43:28 -0700535 size_ = new_end - reinterpret_cast<uint8_t*>(begin_);
536 base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
537 DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700538 size_t tail_size = old_end - new_end;
Ian Rogers13735952014-10-08 12:43:28 -0700539 uint8_t* tail_base_begin = new_base_end;
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700540 size_t tail_base_size = old_base_end - new_base_end;
541 DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
Roland Levillain14d90572015-07-16 10:52:26 +0100542 DCHECK_ALIGNED(tail_base_size, kPageSize);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700543
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000544 int int_fd = -1;
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700545 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000546 if (use_ashmem) {
547 // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
548 // prefixed "dalvik-".
549 std::string debug_friendly_name("dalvik-");
550 debug_friendly_name += tail_name;
551 int_fd = ashmem_create_region(debug_friendly_name.c_str(), tail_base_size);
552 flags = MAP_PRIVATE | MAP_FIXED;
553 if (int_fd == -1) {
554 *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
555 tail_name, strerror(errno));
556 return nullptr;
557 }
558 }
559 ScopedFd fd(int_fd);
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700560
561 MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700562 // Unmap/map the tail region.
563 int result = munmap(tail_base_begin, tail_base_size);
564 if (result == -1) {
Andreas Gampea6dfdae2015-02-24 15:50:19 -0800565 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
566 *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.",
567 tail_base_begin, tail_base_size, name_.c_str());
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700568 return nullptr;
569 }
570 // Don't cause memory allocation between the munmap and the mmap
571 // calls. Otherwise, libc (or something else) might take this memory
572 // region. Note this isn't perfect as there's no way to prevent
573 // other threads to try to take this memory region here.
Ian Rogers13735952014-10-08 12:43:28 -0700574 uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(tail_base_begin, tail_base_size, tail_prot,
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700575 flags, fd.get(), 0));
576 if (actual == MAP_FAILED) {
Andreas Gampea6dfdae2015-02-24 15:50:19 -0800577 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
578 *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
579 "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
580 fd.get());
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700581 return nullptr;
582 }
Jim_Guoa62a5882014-04-28 11:11:57 +0800583 return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700584}
Logan Chiend88fa262012-06-06 15:23:32 +0800585
Ian Rogersc5f17732014-06-05 20:48:42 -0700586void MemMap::MadviseDontNeedAndZero() {
587 if (base_begin_ != nullptr || base_size_ != 0) {
588 if (!kMadviseZeroes) {
589 memset(base_begin_, 0, base_size_);
590 }
591 int result = madvise(base_begin_, base_size_, MADV_DONTNEED);
592 if (result == -1) {
593 PLOG(WARNING) << "madvise failed";
594 }
595 }
596}
597
Vladimir Marko9bdf1082016-01-21 12:15:52 +0000598bool MemMap::Sync() {
Hiroshi Yamauchi29ab3602016-03-08 15:17:21 -0800599 bool result;
600 if (redzone_size_ != 0) {
601 // To avoid valgrind errors, temporarily lift the lower-end noaccess protection before passing
602 // it to msync() as it only accepts page-aligned base address, and exclude the higher-end
603 // noaccess protection from the msync range. b/27552451.
604 uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
605 MEMORY_TOOL_MAKE_DEFINED(base_begin, begin_ - base_begin);
606 result = msync(BaseBegin(), End() - base_begin, MS_SYNC) == 0;
607 MEMORY_TOOL_MAKE_NOACCESS(base_begin, begin_ - base_begin);
608 } else {
609 result = msync(BaseBegin(), BaseSize(), MS_SYNC) == 0;
610 }
611 return result;
Vladimir Marko9bdf1082016-01-21 12:15:52 +0000612}
613
Logan Chiend88fa262012-06-06 15:23:32 +0800614bool MemMap::Protect(int prot) {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700615 if (base_begin_ == nullptr && base_size_ == 0) {
Ian Rogers1c849e52012-06-28 14:00:33 -0700616 prot_ = prot;
Logan Chiend88fa262012-06-06 15:23:32 +0800617 return true;
618 }
619
620 if (mprotect(base_begin_, base_size_, prot) == 0) {
Ian Rogers1c849e52012-06-28 14:00:33 -0700621 prot_ = prot;
Logan Chiend88fa262012-06-06 15:23:32 +0800622 return true;
623 }
624
Shih-wei Liaoa060ed92012-06-07 09:25:28 -0700625 PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
626 << prot << ") failed";
Logan Chiend88fa262012-06-06 15:23:32 +0800627 return false;
628}
629
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700630bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
631 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
632 CHECK(begin_map != nullptr);
633 CHECK(end_map != nullptr);
634 CHECK(HasMemMap(begin_map));
635 CHECK(HasMemMap(end_map));
636 CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin());
637 MemMap* map = begin_map;
638 while (map->BaseBegin() != end_map->BaseBegin()) {
639 MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
640 if (next_map == nullptr) {
641 // Found a gap.
642 return false;
643 }
644 map = next_map;
645 }
646 return true;
647}
648
Vladimir Marko17a924a2015-05-08 15:17:32 +0100649void MemMap::DumpMaps(std::ostream& os, bool terse) {
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700650 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
Vladimir Marko17a924a2015-05-08 15:17:32 +0100651 DumpMapsLocked(os, terse);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700652}
653
Vladimir Marko17a924a2015-05-08 15:17:32 +0100654void MemMap::DumpMapsLocked(std::ostream& os, bool terse) {
655 const auto& mem_maps = *maps_;
656 if (!terse) {
657 os << mem_maps;
658 return;
659 }
660
661 // Terse output example:
662 // [MemMap: 0x409be000+0x20P~0x11dP+0x20P~0x61cP+0x20P prot=0x3 LinearAlloc]
663 // [MemMap: 0x451d6000+0x6bP(3) prot=0x3 large object space allocation]
664 // The details:
665 // "+0x20P" means 0x20 pages taken by a single mapping,
666 // "~0x11dP" means a gap of 0x11d pages,
667 // "+0x6bP(3)" means 3 mappings one after another, together taking 0x6b pages.
668 os << "MemMap:" << std::endl;
669 for (auto it = mem_maps.begin(), maps_end = mem_maps.end(); it != maps_end;) {
670 MemMap* map = it->second;
671 void* base = it->first;
672 CHECK_EQ(base, map->BaseBegin());
673 os << "[MemMap: " << base;
674 ++it;
675 // Merge consecutive maps with the same protect flags and name.
676 constexpr size_t kMaxGaps = 9;
677 size_t num_gaps = 0;
678 size_t num = 1u;
679 size_t size = map->BaseSize();
Roland Levillain14d90572015-07-16 10:52:26 +0100680 CHECK_ALIGNED(size, kPageSize);
Vladimir Marko17a924a2015-05-08 15:17:32 +0100681 void* end = map->BaseEnd();
682 while (it != maps_end &&
683 it->second->GetProtect() == map->GetProtect() &&
684 it->second->GetName() == map->GetName() &&
685 (it->second->BaseBegin() == end || num_gaps < kMaxGaps)) {
686 if (it->second->BaseBegin() != end) {
687 ++num_gaps;
688 os << "+0x" << std::hex << (size / kPageSize) << "P";
689 if (num != 1u) {
690 os << "(" << std::dec << num << ")";
691 }
692 size_t gap =
693 reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end);
Roland Levillain14d90572015-07-16 10:52:26 +0100694 CHECK_ALIGNED(gap, kPageSize);
Vladimir Marko17a924a2015-05-08 15:17:32 +0100695 os << "~0x" << std::hex << (gap / kPageSize) << "P";
696 num = 0u;
697 size = 0u;
698 }
Roland Levillain14d90572015-07-16 10:52:26 +0100699 CHECK_ALIGNED(it->second->BaseSize(), kPageSize);
Vladimir Marko17a924a2015-05-08 15:17:32 +0100700 ++num;
701 size += it->second->BaseSize();
702 end = it->second->BaseEnd();
703 ++it;
704 }
705 os << "+0x" << std::hex << (size / kPageSize) << "P";
706 if (num != 1u) {
707 os << "(" << std::dec << num << ")";
708 }
709 os << " prot=0x" << std::hex << map->GetProtect() << " " << map->GetName() << "]" << std::endl;
710 }
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700711}
712
713bool MemMap::HasMemMap(MemMap* map) {
714 void* base_begin = map->BaseBegin();
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700715 for (auto it = maps_->lower_bound(base_begin), end = maps_->end();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700716 it != end && it->first == base_begin; ++it) {
717 if (it->second == map) {
718 return true;
719 }
720 }
721 return false;
722}
723
724MemMap* MemMap::GetLargestMemMapAt(void* address) {
725 size_t largest_size = 0;
726 MemMap* largest_map = nullptr;
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700727 DCHECK(maps_ != nullptr);
728 for (auto it = maps_->lower_bound(address), end = maps_->end();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700729 it != end && it->first == address; ++it) {
730 MemMap* map = it->second;
731 CHECK(map != nullptr);
732 if (largest_size < map->BaseSize()) {
733 largest_size = map->BaseSize();
734 largest_map = map;
735 }
736 }
737 return largest_map;
738}
739
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700740void MemMap::Init() {
741 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
742 if (maps_ == nullptr) {
743 // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
744 maps_ = new Maps;
745 }
746}
747
748void MemMap::Shutdown() {
749 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
750 delete maps_;
751 maps_ = nullptr;
752}
753
Mathieu Chartier379d09f2015-01-08 11:28:13 -0800754void MemMap::SetSize(size_t new_size) {
755 if (new_size == base_size_) {
756 return;
757 }
758 CHECK_ALIGNED(new_size, kPageSize);
759 CHECK_EQ(base_size_, size_) << "Unsupported";
760 CHECK_LE(new_size, base_size_);
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700761 MEMORY_TOOL_MAKE_UNDEFINED(
762 reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
763 new_size),
764 base_size_ - new_size);
Mathieu Chartier379d09f2015-01-08 11:28:13 -0800765 CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_size),
766 base_size_ - new_size), 0) << new_size << " " << base_size_;
767 base_size_ = new_size;
768 size_ = new_size;
769}
770
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800771void* MemMap::MapInternal(void* addr,
772 size_t length,
773 int prot,
774 int flags,
775 int fd,
776 off_t offset,
777 bool low_4gb) {
778#ifdef __LP64__
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800779 // When requesting low_4g memory and having an expectation, the requested range should fit into
780 // 4GB.
781 if (low_4gb && (
782 // Start out of bounds.
783 (reinterpret_cast<uintptr_t>(addr) >> 32) != 0 ||
784 // End out of bounds. For simplicity, this will fail for the last page of memory.
785 ((reinterpret_cast<uintptr_t>(addr) + length) >> 32) != 0)) {
786 LOG(ERROR) << "The requested address space (" << addr << ", "
787 << reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + length)
788 << ") cannot fit in low_4gb";
789 return MAP_FAILED;
790 }
791#else
792 UNUSED(low_4gb);
793#endif
794 DCHECK_ALIGNED(length, kPageSize);
795 if (low_4gb) {
796 DCHECK_EQ(flags & MAP_FIXED, 0);
797 }
798 // TODO:
799 // A page allocator would be a useful abstraction here, as
800 // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
801 void* actual = MAP_FAILED;
802#if USE_ART_LOW_4G_ALLOCATOR
803 // MAP_32BIT only available on x86_64.
804 if (low_4gb && addr == nullptr) {
805 bool first_run = true;
806
807 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
808 for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
809 // Use maps_ as an optimization to skip over large maps.
810 // Find the first map which is address > ptr.
811 auto it = maps_->upper_bound(reinterpret_cast<void*>(ptr));
812 if (it != maps_->begin()) {
813 auto before_it = it;
814 --before_it;
815 // Start at the end of the map before the upper bound.
816 ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
817 CHECK_ALIGNED(ptr, kPageSize);
818 }
819 while (it != maps_->end()) {
820 // How much space do we have until the next map?
821 size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
822 // If the space may be sufficient, break out of the loop.
823 if (delta >= length) {
824 break;
825 }
826 // Otherwise, skip to the end of the map.
827 ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
828 CHECK_ALIGNED(ptr, kPageSize);
829 ++it;
830 }
831
832 // Try to see if we get lucky with this address since none of the ART maps overlap.
833 actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
834 if (actual != MAP_FAILED) {
835 next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + length;
836 return actual;
837 }
838
839 if (4U * GB - ptr < length) {
840 // Not enough memory until 4GB.
841 if (first_run) {
842 // Try another time from the bottom;
843 ptr = LOW_MEM_START - kPageSize;
844 first_run = false;
845 continue;
846 } else {
847 // Second try failed.
848 break;
849 }
850 }
851
852 uintptr_t tail_ptr;
853
854 // Check pages are free.
855 bool safe = true;
856 for (tail_ptr = ptr; tail_ptr < ptr + length; tail_ptr += kPageSize) {
857 if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
858 safe = false;
859 break;
860 } else {
861 DCHECK_EQ(errno, ENOMEM);
862 }
863 }
864
865 next_mem_pos_ = tail_ptr; // update early, as we break out when we found and mapped a region
866
867 if (safe == true) {
868 actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
869 if (actual != MAP_FAILED) {
870 return actual;
871 }
872 } else {
873 // Skip over last page.
874 ptr = tail_ptr;
875 }
876 }
877
878 if (actual == MAP_FAILED) {
879 LOG(ERROR) << "Could not find contiguous low-memory space.";
880 errno = ENOMEM;
881 }
882 } else {
883 actual = mmap(addr, length, prot, flags, fd, offset);
884 }
885
886#else
887#if defined(__LP64__)
888 if (low_4gb && addr == nullptr) {
889 flags |= MAP_32BIT;
890 }
891#endif
892 actual = mmap(addr, length, prot, flags, fd, offset);
893#endif
894 return actual;
895}
896
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800897std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700898 os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
899 mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
900 mem_map.GetName().c_str());
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800901 return os;
902}
903
Hiroshi Yamauchi6edb9ae2016-02-08 14:18:21 -0800904void MemMap::TryReadable() {
905 if (base_begin_ == nullptr && base_size_ == 0) {
906 return;
907 }
908 CHECK_NE(prot_ & PROT_READ, 0);
909 volatile uint8_t* begin = reinterpret_cast<volatile uint8_t*>(base_begin_);
910 volatile uint8_t* end = begin + base_size_;
911 DCHECK(IsAligned<kPageSize>(begin));
912 DCHECK(IsAligned<kPageSize>(end));
913 // Read the first byte of each page. Use volatile to prevent the compiler from optimizing away the
914 // reads.
915 for (volatile uint8_t* ptr = begin; ptr < end; ptr += kPageSize) {
916 // This read could fault if protection wasn't set correctly.
917 uint8_t value = *ptr;
918 UNUSED(value);
919 }
920}
921
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700922} // namespace art