blob: 90cc189a399b219ecd99b6b6168a03e9c134b191 [file] [log] [blame]
Brian Carlstrom27ec9612011-09-19 20:20:38 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
Evgenii Stepanov1e133742015-05-20 12:30:59 -070019#include "base/memory_tool.h"
Christopher Ferris943af7d2014-01-16 12:41:46 -080020#include <backtrace/BacktraceMap.h>
Ian Rogersc7dd2952014-10-21 23:31:19 -070021#include <inttypes.h>
Josh Gao0389cd52015-09-16 16:27:00 -070022#include <stdlib.h>
Ian Rogersc7dd2952014-10-21 23:31:19 -070023
Ian Rogers700a4022014-05-19 16:49:03 -070024#include <memory>
Ian Rogersc7dd2952014-10-21 23:31:19 -070025#include <sstream>
Elliott Hughesecd3a6f2012-06-06 18:16:37 -070026
Elliott Hughese222ee02012-12-13 14:41:43 -080027#include "base/stringprintf.h"
Andreas Gampe277ccbd2014-11-03 21:36:10 -080028
29#pragma GCC diagnostic push
30#pragma GCC diagnostic ignored "-Wshadow"
Elliott Hughese222ee02012-12-13 14:41:43 -080031#include "ScopedFd.h"
Andreas Gampe277ccbd2014-11-03 21:36:10 -080032#pragma GCC diagnostic pop
33
Ian Rogersc7dd2952014-10-21 23:31:19 -070034#include "thread-inl.h"
Elliott Hughese222ee02012-12-13 14:41:43 -080035#include "utils.h"
36
Elliott Hughes6c9c06d2011-11-07 16:43:47 -080037#define USE_ASHMEM 1
38
39#ifdef USE_ASHMEM
40#include <cutils/ashmem.h>
Ian Rogers997f0f92014-06-21 22:58:05 -070041#ifndef ANDROID_OS
42#include <sys/resource.h>
43#endif
Elliott Hughes6c9c06d2011-11-07 16:43:47 -080044#endif
45
Ian Rogersd6b68652014-06-23 14:07:03 -070046#ifndef MAP_ANONYMOUS
47#define MAP_ANONYMOUS MAP_ANON
48#endif
49
Brian Carlstrom27ec9612011-09-19 20:20:38 -070050namespace art {
51
Christopher Ferris943af7d2014-01-16 12:41:46 -080052static std::ostream& operator<<(
53 std::ostream& os,
54 std::pair<BacktraceMap::const_iterator, BacktraceMap::const_iterator> iters) {
55 for (BacktraceMap::const_iterator it = iters.first; it != iters.second; ++it) {
56 os << StringPrintf("0x%08x-0x%08x %c%c%c %s\n",
57 static_cast<uint32_t>(it->start),
58 static_cast<uint32_t>(it->end),
59 (it->flags & PROT_READ) ? 'r' : '-',
60 (it->flags & PROT_WRITE) ? 'w' : '-',
61 (it->flags & PROT_EXEC) ? 'x' : '-', it->name.c_str());
Elliott Hughesecd3a6f2012-06-06 18:16:37 -070062 }
63 return os;
Brian Carlstrom27ec9612011-09-19 20:20:38 -070064}
65
Mathieu Chartierbad02672014-08-25 13:08:22 -070066std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps) {
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -070067 os << "MemMap:" << std::endl;
68 for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
69 void* base = it->first;
70 MemMap* map = it->second;
71 CHECK_EQ(base, map->BaseBegin());
72 os << *map << std::endl;
73 }
74 return os;
75}
76
Mathieu Chartier6e88ef62014-10-14 15:01:24 -070077MemMap::Maps* MemMap::maps_ = nullptr;
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -070078
Ian Rogersc3ccc102014-06-25 11:52:14 -070079#if USE_ART_LOW_4G_ALLOCATOR
Andreas Gamped8f26db2014-05-19 17:01:13 -070080// Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
81
82// The regular start of memory allocations. The first 64KB is protected by SELinux.
Andreas Gampe6bd621a2014-05-16 17:28:58 -070083static constexpr uintptr_t LOW_MEM_START = 64 * KB;
Andreas Gampe7104cbf2014-03-21 11:44:43 -070084
Andreas Gamped8f26db2014-05-19 17:01:13 -070085// Generate random starting position.
86// To not interfere with image position, take the image's address and only place it below. Current
87// formula (sketch):
88//
89// ART_BASE_ADDR = 0001XXXXXXXXXXXXXXX
90// ----------------------------------------
91// = 0000111111111111111
92// & ~(kPageSize - 1) =~0000000000000001111
93// ----------------------------------------
94// mask = 0000111111111110000
95// & random data = YYYYYYYYYYYYYYYYYYY
96// -----------------------------------
97// tmp = 0000YYYYYYYYYYY0000
98// + LOW_MEM_START = 0000000000001000000
99// --------------------------------------
100// start
101//
Josh Gao0389cd52015-09-16 16:27:00 -0700102// arc4random as an entropy source is exposed in Bionic, but not in glibc. When we
Andreas Gamped8f26db2014-05-19 17:01:13 -0700103// do not have Bionic, simply start with LOW_MEM_START.
104
105// Function is standalone so it can be tested somewhat in mem_map_test.cc.
106#ifdef __BIONIC__
107uintptr_t CreateStartPos(uint64_t input) {
108 CHECK_NE(0, ART_BASE_ADDRESS);
109
110 // Start with all bits below highest bit in ART_BASE_ADDRESS.
111 constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS));
112 constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1;
113
114 // Lowest (usually 12) bits are not used, as aligned by page size.
115 constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1);
116
117 // Mask input data.
118 return (input & mask) + LOW_MEM_START;
119}
120#endif
121
122static uintptr_t GenerateNextMemPos() {
123#ifdef __BIONIC__
Josh Gao0389cd52015-09-16 16:27:00 -0700124 uint64_t random_data;
125 arc4random_buf(&random_data, sizeof(random_data));
126 return CreateStartPos(random_data);
Andreas Gamped8f26db2014-05-19 17:01:13 -0700127#else
Josh Gao0389cd52015-09-16 16:27:00 -0700128 // No arc4random on host, see above.
Andreas Gamped8f26db2014-05-19 17:01:13 -0700129 return LOW_MEM_START;
130#endif
131}
132
133// Initialize linear scan to random position.
134uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000135#endif
136
Jim_Guoa62a5882014-04-28 11:11:57 +0800137// Return true if the address range is contained in a single /proc/self/map entry.
Mathieu Chartiere58991b2015-10-13 07:59:34 -0700138bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg) {
Vladimir Marko5c42c292015-02-25 12:02:49 +0000139 uintptr_t begin = reinterpret_cast<uintptr_t>(ptr);
140 uintptr_t end = begin + size;
Mathieu Chartiere58991b2015-10-13 07:59:34 -0700141
142 {
143 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
144 for (auto& pair : *MemMap::maps_) {
145 MemMap* const map = pair.second;
146 if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) &&
147 end <= reinterpret_cast<uintptr_t>(map->End())) {
148 return true;
149 }
150 }
151 }
152
Jim_Guoa62a5882014-04-28 11:11:57 +0800153 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
Christopher Ferris836572a2014-08-05 15:43:13 -0700154 if (map.get() == nullptr) {
Jim_Guoa62a5882014-04-28 11:11:57 +0800155 *error_msg = StringPrintf("Failed to build process map");
156 return false;
157 }
158 for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
159 if ((begin >= it->start && begin < it->end) // start of new within old
160 && (end > it->start && end <= it->end)) { // end of new within old
161 return true;
162 }
163 }
Vladimir Markob5505822015-05-08 11:10:16 +0100164 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
Jim_Guoa62a5882014-04-28 11:11:57 +0800165 *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
Andreas Gampea6dfdae2015-02-24 15:50:19 -0800166 "any existing map. See process maps in the log.", begin, end);
Jim_Guoa62a5882014-04-28 11:11:57 +0800167 return false;
168}
169
170// Return true if the address range does not conflict with any /proc/self/maps entry.
171static bool CheckNonOverlapping(uintptr_t begin,
172 uintptr_t end,
173 std::string* error_msg) {
174 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
Christopher Ferris836572a2014-08-05 15:43:13 -0700175 if (map.get() == nullptr) {
Jim_Guoa62a5882014-04-28 11:11:57 +0800176 *error_msg = StringPrintf("Failed to build process map");
177 return false;
178 }
179 for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
180 if ((begin >= it->start && begin < it->end) // start of new within old
181 || (end > it->start && end < it->end) // end of new within old
182 || (begin <= it->start && end > it->end)) { // start/end of new includes all of old
183 std::ostringstream map_info;
184 map_info << std::make_pair(it, map->end());
185 *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
186 "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s",
187 begin, end,
188 static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
189 it->name.c_str(),
190 map_info.str().c_str());
191 return false;
192 }
193 }
194 return true;
195}
196
197// CheckMapRequest to validate a non-MAP_FAILED mmap result based on
198// the expected value, calling munmap if validation fails, giving the
199// reason in error_msg.
200//
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700201// If the expected_ptr is null, nothing is checked beyond the fact
Jim_Guoa62a5882014-04-28 11:11:57 +0800202// that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
203// non-null, we check that pointer is the actual_ptr == expected_ptr,
204// and if not, report in error_msg what the conflict mapping was if
205// found, or a generic error in other cases.
Ian Rogers13735952014-10-08 12:43:28 -0700206static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
Jim_Guoa62a5882014-04-28 11:11:57 +0800207 std::string* error_msg) {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700208 // Handled first by caller for more specific error messages.
209 CHECK(actual_ptr != MAP_FAILED);
210
211 if (expected_ptr == nullptr) {
212 return true;
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700213 }
Elliott Hughesecd3a6f2012-06-06 18:16:37 -0700214
Jim_Guoa62a5882014-04-28 11:11:57 +0800215 uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
216 uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
217 uintptr_t limit = expected + byte_count;
218
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700219 if (expected_ptr == actual_ptr) {
220 return true;
221 }
222
223 // We asked for an address but didn't get what we wanted, all paths below here should fail.
224 int result = munmap(actual_ptr, byte_count);
225 if (result == -1) {
226 PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
227 }
228
Narayan Kamathb89c3da2014-08-21 17:38:09 +0100229 // We call this here so that we can try and generate a full error
230 // message with the overlapping mapping. There's no guarantee that
231 // that there will be an overlap though, since
Kenny Root1d8199d2015-06-02 11:01:10 -0700232 // - The kernel is not *required* to honor expected_ptr unless MAP_FIXED is
Narayan Kamathb89c3da2014-08-21 17:38:09 +0100233 // true, even if there is no overlap
234 // - There might have been an overlap at the point of mmap, but the
235 // overlapping region has since been unmapped.
236 std::string error_detail;
237 CheckNonOverlapping(expected, limit, &error_detail);
238
239 std::ostringstream os;
240 os << StringPrintf("Failed to mmap at expected address, mapped at "
241 "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR,
242 actual, expected);
243 if (!error_detail.empty()) {
244 os << " : " << error_detail;
Christopher Ferris943af7d2014-01-16 12:41:46 -0800245 }
Jim_Guoa62a5882014-04-28 11:11:57 +0800246
Narayan Kamathb89c3da2014-08-21 17:38:09 +0100247 *error_msg = os.str();
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700248 return false;
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700249}
250
Mathieu Chartier38c82212015-06-04 16:22:41 -0700251#if USE_ART_LOW_4G_ALLOCATOR
252static inline void* TryMemMapLow4GB(void* ptr, size_t page_aligned_byte_count, int prot, int flags,
253 int fd) {
254 void* actual = mmap(ptr, page_aligned_byte_count, prot, flags, fd, 0);
255 if (actual != MAP_FAILED) {
256 // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
257 // 4GB. If this is the case, unmap and retry.
258 if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) {
259 munmap(actual, page_aligned_byte_count);
260 actual = MAP_FAILED;
261 }
262 }
263 return actual;
264}
265#endif
266
Ian Rogers13735952014-10-08 12:43:28 -0700267MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byte_count, int prot,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000268 bool low_4gb, bool reuse, std::string* error_msg) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700269#ifndef __LP64__
270 UNUSED(low_4gb);
271#endif
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700272 if (byte_count == 0) {
Jim_Guoa62a5882014-04-28 11:11:57 +0800273 return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700274 }
Elliott Hughesecd3a6f2012-06-06 18:16:37 -0700275 size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800276
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800277 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
Vladimir Marko5c42c292015-02-25 12:02:49 +0000278 if (reuse) {
279 // reuse means it is okay that it overlaps an existing page mapping.
280 // Only use this if you actually made the page reservation yourself.
281 CHECK(expected_ptr != nullptr);
282
Vladimir Markob5505822015-05-08 11:10:16 +0100283 DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg;
Vladimir Marko5c42c292015-02-25 12:02:49 +0000284 flags |= MAP_FIXED;
285 }
286
Ian Rogers997f0f92014-06-21 22:58:05 -0700287 ScopedFd fd(-1);
288
289#ifdef USE_ASHMEM
Andreas Gampec60e1b72015-07-30 08:57:50 -0700290#ifdef __ANDROID__
Ian Rogers997f0f92014-06-21 22:58:05 -0700291 const bool use_ashmem = true;
292#else
293 // When not on Android ashmem is faked using files in /tmp. Ensure that such files won't
294 // fail due to ulimit restrictions. If they will then use a regular mmap.
295 struct rlimit rlimit_fsize;
296 CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0);
297 const bool use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) ||
298 (page_aligned_byte_count < rlimit_fsize.rlim_cur);
299#endif
300 if (use_ashmem) {
301 // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
302 // prefixed "dalvik-".
303 std::string debug_friendly_name("dalvik-");
304 debug_friendly_name += name;
305 fd.reset(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count));
306 if (fd.get() == -1) {
307 *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno));
308 return nullptr;
309 }
Vladimir Marko5c42c292015-02-25 12:02:49 +0000310 flags &= ~MAP_ANONYMOUS;
Ian Rogers997f0f92014-06-21 22:58:05 -0700311 }
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800312#endif
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000313
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700314 // We need to store and potentially set an error number for pretty printing of errors
315 int saved_errno = 0;
316
Qiming Shi84d49cc2014-04-24 15:38:41 +0800317#ifdef __LP64__
318 // When requesting low_4g memory and having an expectation, the requested range should fit into
319 // 4GB.
320 if (low_4gb && (
321 // Start out of bounds.
Jim_Guoa62a5882014-04-28 11:11:57 +0800322 (reinterpret_cast<uintptr_t>(expected_ptr) >> 32) != 0 ||
Qiming Shi84d49cc2014-04-24 15:38:41 +0800323 // End out of bounds. For simplicity, this will fail for the last page of memory.
Jim_Guoa62a5882014-04-28 11:11:57 +0800324 (reinterpret_cast<uintptr_t>(expected_ptr + page_aligned_byte_count) >> 32) != 0)) {
Qiming Shi84d49cc2014-04-24 15:38:41 +0800325 *error_msg = StringPrintf("The requested address space (%p, %p) cannot fit in low_4gb",
Jim_Guoa62a5882014-04-28 11:11:57 +0800326 expected_ptr, expected_ptr + page_aligned_byte_count);
Qiming Shi84d49cc2014-04-24 15:38:41 +0800327 return nullptr;
328 }
329#endif
330
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000331 // TODO:
332 // A page allocator would be a useful abstraction here, as
333 // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
334 // 2) The linear scheme, even with simple saving of the last known position, is very crude
Ian Rogersc3ccc102014-06-25 11:52:14 -0700335#if USE_ART_LOW_4G_ALLOCATOR
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000336 // MAP_32BIT only available on x86_64.
337 void* actual = MAP_FAILED;
Jim_Guoa62a5882014-04-28 11:11:57 +0800338 if (low_4gb && expected_ptr == nullptr) {
Andreas Gampe7104cbf2014-03-21 11:44:43 -0700339 bool first_run = true;
340
Mathieu Chartier7889a772015-06-04 13:42:14 -0700341 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
Andreas Gampe71a3eba2014-03-17 12:57:08 -0700342 for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
Mathieu Chartier7889a772015-06-04 13:42:14 -0700343 // Use maps_ as an optimization to skip over large maps.
344 // Find the first map which is address > ptr.
345 auto it = maps_->upper_bound(reinterpret_cast<void*>(ptr));
346 if (it != maps_->begin()) {
347 auto before_it = it;
348 --before_it;
349 // Start at the end of the map before the upper bound.
350 ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
351 CHECK_ALIGNED(ptr, kPageSize);
352 }
353 while (it != maps_->end()) {
354 // How much space do we have until the next map?
355 size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
356 // If the space may be sufficient, break out of the loop.
357 if (delta >= page_aligned_byte_count) {
358 break;
359 }
360 // Otherwise, skip to the end of the map.
361 ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
362 CHECK_ALIGNED(ptr, kPageSize);
363 ++it;
364 }
365
Mathieu Chartier38c82212015-06-04 16:22:41 -0700366 // Try to see if we get lucky with this address since none of the ART maps overlap.
367 actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags,
368 fd.get());
369 if (actual != MAP_FAILED) {
370 next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count;
371 break;
372 }
373
Andreas Gampe7104cbf2014-03-21 11:44:43 -0700374 if (4U * GB - ptr < page_aligned_byte_count) {
375 // Not enough memory until 4GB.
376 if (first_run) {
377 // Try another time from the bottom;
Andreas Gampe9de65ff2014-03-21 17:25:57 -0700378 ptr = LOW_MEM_START - kPageSize;
Andreas Gampe7104cbf2014-03-21 11:44:43 -0700379 first_run = false;
380 continue;
381 } else {
382 // Second try failed.
383 break;
384 }
385 }
386
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000387 uintptr_t tail_ptr;
388
389 // Check pages are free.
390 bool safe = true;
391 for (tail_ptr = ptr; tail_ptr < ptr + page_aligned_byte_count; tail_ptr += kPageSize) {
392 if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
393 safe = false;
394 break;
395 } else {
396 DCHECK_EQ(errno, ENOMEM);
397 }
398 }
399
400 next_mem_pos_ = tail_ptr; // update early, as we break out when we found and mapped a region
401
402 if (safe == true) {
Mathieu Chartier38c82212015-06-04 16:22:41 -0700403 actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags,
404 fd.get());
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000405 if (actual != MAP_FAILED) {
Mathieu Chartierc355a2a2014-05-30 13:02:46 -0700406 break;
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000407 }
408 } else {
409 // Skip over last page.
410 ptr = tail_ptr;
411 }
412 }
413
414 if (actual == MAP_FAILED) {
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700415 LOG(ERROR) << "Could not find contiguous low-memory space.";
416 saved_errno = ENOMEM;
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000417 }
418 } else {
Jim_Guoa62a5882014-04-28 11:11:57 +0800419 actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0);
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700420 saved_errno = errno;
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000421 }
422
423#else
Ian Rogersc3ccc102014-06-25 11:52:14 -0700424#if defined(__LP64__)
Jim_Guoa62a5882014-04-28 11:11:57 +0800425 if (low_4gb && expected_ptr == nullptr) {
Ian Rogersef7d42f2014-01-06 12:55:46 -0800426 flags |= MAP_32BIT;
427 }
428#endif
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700429
Jim_Guoa62a5882014-04-28 11:11:57 +0800430 void* actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0);
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700431 saved_errno = errno;
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000432#endif
433
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700434 if (actual == MAP_FAILED) {
Andreas Gampea6dfdae2015-02-24 15:50:19 -0800435 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700436
Andreas Gampea6dfdae2015-02-24 15:50:19 -0800437 *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. See process "
438 "maps in the log.", expected_ptr, page_aligned_byte_count, prot,
439 flags, fd.get(), strerror(saved_errno));
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700440 return nullptr;
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700441 }
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700442 std::ostringstream check_map_request_error_msg;
Jim_Guoa62a5882014-04-28 11:11:57 +0800443 if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700444 return nullptr;
445 }
Ian Rogers13735952014-10-08 12:43:28 -0700446 return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual,
Mathieu Chartier01d4b502015-06-12 17:32:31 -0700447 page_aligned_byte_count, prot, reuse);
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700448}
449
David Srbecky1baabf02015-06-16 17:12:34 +0000450MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
451 if (byte_count == 0) {
452 return new MemMap(name, nullptr, 0, nullptr, 0, 0, false);
453 }
454 const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
455 return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
456}
457
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700458MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int prot, int flags,
459 int fd, off_t start, bool reuse, const char* filename,
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700460 std::string* error_msg) {
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700461 CHECK_NE(0, prot);
462 CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
Narayan Kamathb89c3da2014-08-21 17:38:09 +0100463
464 // Note that we do not allow MAP_FIXED unless reuse == true, i.e we
465 // expect his mapping to be contained within an existing map.
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700466 if (reuse) {
467 // reuse means it is okay that it overlaps an existing page mapping.
468 // Only use this if you actually made the page reservation yourself.
Jim_Guoa62a5882014-04-28 11:11:57 +0800469 CHECK(expected_ptr != nullptr);
Narayan Kamathb89c3da2014-08-21 17:38:09 +0100470
Vladimir Markob5505822015-05-08 11:10:16 +0100471 DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg;
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700472 flags |= MAP_FIXED;
473 } else {
474 CHECK_EQ(0, flags & MAP_FIXED);
Narayan Kamathb89c3da2014-08-21 17:38:09 +0100475 // Don't bother checking for an overlapping region here. We'll
476 // check this if required after the fact inside CheckMapRequest.
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700477 }
478
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700479 if (byte_count == 0) {
Jim_Guoa62a5882014-04-28 11:11:57 +0800480 return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false);
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700481 }
Ian Rogersf8adc602013-04-18 17:06:19 -0700482 // Adjust 'offset' to be page-aligned as required by mmap.
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700483 int page_offset = start % kPageSize;
484 off_t page_aligned_offset = start - page_offset;
Ian Rogersf8adc602013-04-18 17:06:19 -0700485 // Adjust 'byte_count' to be page-aligned as we will map this anyway.
Elliott Hughesecd3a6f2012-06-06 18:16:37 -0700486 size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
Jim_Guoa62a5882014-04-28 11:11:57 +0800487 // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
488 // not necessarily to virtual memory. mmap will page align 'expected' for us.
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700489 uint8_t* page_aligned_expected =
490 (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700491
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700492 size_t redzone_size = 0;
493 if (RUNNING_ON_MEMORY_TOOL && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
494 redzone_size = kPageSize;
495 page_aligned_byte_count += redzone_size;
496 }
497
Ian Rogers13735952014-10-08 12:43:28 -0700498 uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(page_aligned_expected,
Elliott Hughesecd3a6f2012-06-06 18:16:37 -0700499 page_aligned_byte_count,
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700500 prot,
501 flags,
502 fd,
503 page_aligned_offset));
504 if (actual == MAP_FAILED) {
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700505 auto saved_errno = errno;
506
Andreas Gampea6dfdae2015-02-24 15:50:19 -0800507 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700508
Mathieu Chartierc7cb1902014-03-05 14:41:03 -0800509 *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
Andreas Gampea6dfdae2015-02-24 15:50:19 -0800510 ") of file '%s' failed: %s. See process maps in the log.",
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700511 page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700512 static_cast<int64_t>(page_aligned_offset), filename,
Andreas Gampea6dfdae2015-02-24 15:50:19 -0800513 strerror(saved_errno));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700514 return nullptr;
515 }
516 std::ostringstream check_map_request_error_msg;
Jim_Guoa62a5882014-04-28 11:11:57 +0800517 if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700518 return nullptr;
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700519 }
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700520 if (redzone_size != 0) {
521 const uint8_t *real_start = actual + page_offset;
522 const uint8_t *real_end = actual + page_offset + byte_count;
523 const uint8_t *mapping_end = actual + page_aligned_byte_count;
524
525 MEMORY_TOOL_MAKE_NOACCESS(actual, real_start - actual);
526 MEMORY_TOOL_MAKE_NOACCESS(real_end, mapping_end - real_end);
527 page_aligned_byte_count -= redzone_size;
528 }
529
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800530 return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700531 prot, reuse, redzone_size);
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700532}
533
534MemMap::~MemMap() {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700535 if (base_begin_ == nullptr && base_size_ == 0) {
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700536 return;
537 }
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700538
539 // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned
540 // before it is returned to the system.
541 if (redzone_size_ != 0) {
542 MEMORY_TOOL_MAKE_UNDEFINED(
543 reinterpret_cast<char*>(base_begin_) + base_size_ - redzone_size_,
544 redzone_size_);
545 }
546
Jim_Guoa62a5882014-04-28 11:11:57 +0800547 if (!reuse_) {
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700548 MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
Jim_Guoa62a5882014-04-28 11:11:57 +0800549 int result = munmap(base_begin_, base_size_);
550 if (result == -1) {
551 PLOG(FATAL) << "munmap failed";
552 }
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700553 }
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700554
555 // Remove it from maps_.
556 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
557 bool found = false;
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700558 DCHECK(maps_ != nullptr);
559 for (auto it = maps_->lower_bound(base_begin_), end = maps_->end();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700560 it != end && it->first == base_begin_; ++it) {
561 if (it->second == this) {
562 found = true;
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700563 maps_->erase(it);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700564 break;
565 }
566 }
567 CHECK(found) << "MemMap not found";
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700568}
569
Ian Rogers13735952014-10-08 12:43:28 -0700570MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700571 size_t base_size, int prot, bool reuse, size_t redzone_size)
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700572 : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700573 prot_(prot), reuse_(reuse), redzone_size_(redzone_size) {
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700574 if (size_ == 0) {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700575 CHECK(begin_ == nullptr);
576 CHECK(base_begin_ == nullptr);
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700577 CHECK_EQ(base_size_, 0U);
578 } else {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700579 CHECK(begin_ != nullptr);
580 CHECK(base_begin_ != nullptr);
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700581 CHECK_NE(base_size_, 0U);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700582
583 // Add it to maps_.
584 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700585 DCHECK(maps_ != nullptr);
586 maps_->insert(std::make_pair(base_begin_, this));
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700587 }
Andreas Gampec8ccf682014-09-29 20:07:43 -0700588}
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700589
Ian Rogers13735952014-10-08 12:43:28 -0700590MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700591 std::string* error_msg) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700592 DCHECK_GE(new_end, Begin());
593 DCHECK_LE(new_end, End());
Ian Rogers13735952014-10-08 12:43:28 -0700594 DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
Roland Levillain14d90572015-07-16 10:52:26 +0100595 DCHECK_ALIGNED(begin_, kPageSize);
596 DCHECK_ALIGNED(base_begin_, kPageSize);
597 DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize);
598 DCHECK_ALIGNED(new_end, kPageSize);
Ian Rogers13735952014-10-08 12:43:28 -0700599 uint8_t* old_end = begin_ + size_;
600 uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
601 uint8_t* new_base_end = new_end;
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700602 DCHECK_LE(new_base_end, old_base_end);
603 if (new_base_end == old_base_end) {
Jim_Guoa62a5882014-04-28 11:11:57 +0800604 return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700605 }
Ian Rogers13735952014-10-08 12:43:28 -0700606 size_ = new_end - reinterpret_cast<uint8_t*>(begin_);
607 base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
608 DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700609 size_t tail_size = old_end - new_end;
Ian Rogers13735952014-10-08 12:43:28 -0700610 uint8_t* tail_base_begin = new_base_end;
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700611 size_t tail_base_size = old_base_end - new_base_end;
612 DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
Roland Levillain14d90572015-07-16 10:52:26 +0100613 DCHECK_ALIGNED(tail_base_size, kPageSize);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700614
615#ifdef USE_ASHMEM
616 // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
617 // prefixed "dalvik-".
618 std::string debug_friendly_name("dalvik-");
619 debug_friendly_name += tail_name;
620 ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000621 int flags = MAP_PRIVATE | MAP_FIXED;
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700622 if (fd.get() == -1) {
623 *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
624 tail_name, strerror(errno));
625 return nullptr;
626 }
627#else
628 ScopedFd fd(-1);
629 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
630#endif
631
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700632
633 MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700634 // Unmap/map the tail region.
635 int result = munmap(tail_base_begin, tail_base_size);
636 if (result == -1) {
Andreas Gampea6dfdae2015-02-24 15:50:19 -0800637 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
638 *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.",
639 tail_base_begin, tail_base_size, name_.c_str());
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700640 return nullptr;
641 }
642 // Don't cause memory allocation between the munmap and the mmap
643 // calls. Otherwise, libc (or something else) might take this memory
644 // region. Note this isn't perfect as there's no way to prevent
645 // other threads to try to take this memory region here.
Ian Rogers13735952014-10-08 12:43:28 -0700646 uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(tail_base_begin, tail_base_size, tail_prot,
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700647 flags, fd.get(), 0));
648 if (actual == MAP_FAILED) {
Andreas Gampea6dfdae2015-02-24 15:50:19 -0800649 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
650 *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
651 "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
652 fd.get());
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700653 return nullptr;
654 }
Jim_Guoa62a5882014-04-28 11:11:57 +0800655 return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700656}
Logan Chiend88fa262012-06-06 15:23:32 +0800657
Ian Rogersc5f17732014-06-05 20:48:42 -0700658void MemMap::MadviseDontNeedAndZero() {
659 if (base_begin_ != nullptr || base_size_ != 0) {
660 if (!kMadviseZeroes) {
661 memset(base_begin_, 0, base_size_);
662 }
663 int result = madvise(base_begin_, base_size_, MADV_DONTNEED);
664 if (result == -1) {
665 PLOG(WARNING) << "madvise failed";
666 }
667 }
668}
669
Logan Chiend88fa262012-06-06 15:23:32 +0800670bool MemMap::Protect(int prot) {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700671 if (base_begin_ == nullptr && base_size_ == 0) {
Ian Rogers1c849e52012-06-28 14:00:33 -0700672 prot_ = prot;
Logan Chiend88fa262012-06-06 15:23:32 +0800673 return true;
674 }
675
676 if (mprotect(base_begin_, base_size_, prot) == 0) {
Ian Rogers1c849e52012-06-28 14:00:33 -0700677 prot_ = prot;
Logan Chiend88fa262012-06-06 15:23:32 +0800678 return true;
679 }
680
Shih-wei Liaoa060ed92012-06-07 09:25:28 -0700681 PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
682 << prot << ") failed";
Logan Chiend88fa262012-06-06 15:23:32 +0800683 return false;
684}
685
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700686bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
687 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
688 CHECK(begin_map != nullptr);
689 CHECK(end_map != nullptr);
690 CHECK(HasMemMap(begin_map));
691 CHECK(HasMemMap(end_map));
692 CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin());
693 MemMap* map = begin_map;
694 while (map->BaseBegin() != end_map->BaseBegin()) {
695 MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
696 if (next_map == nullptr) {
697 // Found a gap.
698 return false;
699 }
700 map = next_map;
701 }
702 return true;
703}
704
Vladimir Marko17a924a2015-05-08 15:17:32 +0100705void MemMap::DumpMaps(std::ostream& os, bool terse) {
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700706 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
Vladimir Marko17a924a2015-05-08 15:17:32 +0100707 DumpMapsLocked(os, terse);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700708}
709
Vladimir Marko17a924a2015-05-08 15:17:32 +0100710void MemMap::DumpMapsLocked(std::ostream& os, bool terse) {
711 const auto& mem_maps = *maps_;
712 if (!terse) {
713 os << mem_maps;
714 return;
715 }
716
717 // Terse output example:
718 // [MemMap: 0x409be000+0x20P~0x11dP+0x20P~0x61cP+0x20P prot=0x3 LinearAlloc]
719 // [MemMap: 0x451d6000+0x6bP(3) prot=0x3 large object space allocation]
720 // The details:
721 // "+0x20P" means 0x20 pages taken by a single mapping,
722 // "~0x11dP" means a gap of 0x11d pages,
723 // "+0x6bP(3)" means 3 mappings one after another, together taking 0x6b pages.
724 os << "MemMap:" << std::endl;
725 for (auto it = mem_maps.begin(), maps_end = mem_maps.end(); it != maps_end;) {
726 MemMap* map = it->second;
727 void* base = it->first;
728 CHECK_EQ(base, map->BaseBegin());
729 os << "[MemMap: " << base;
730 ++it;
731 // Merge consecutive maps with the same protect flags and name.
732 constexpr size_t kMaxGaps = 9;
733 size_t num_gaps = 0;
734 size_t num = 1u;
735 size_t size = map->BaseSize();
Roland Levillain14d90572015-07-16 10:52:26 +0100736 CHECK_ALIGNED(size, kPageSize);
Vladimir Marko17a924a2015-05-08 15:17:32 +0100737 void* end = map->BaseEnd();
738 while (it != maps_end &&
739 it->second->GetProtect() == map->GetProtect() &&
740 it->second->GetName() == map->GetName() &&
741 (it->second->BaseBegin() == end || num_gaps < kMaxGaps)) {
742 if (it->second->BaseBegin() != end) {
743 ++num_gaps;
744 os << "+0x" << std::hex << (size / kPageSize) << "P";
745 if (num != 1u) {
746 os << "(" << std::dec << num << ")";
747 }
748 size_t gap =
749 reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end);
Roland Levillain14d90572015-07-16 10:52:26 +0100750 CHECK_ALIGNED(gap, kPageSize);
Vladimir Marko17a924a2015-05-08 15:17:32 +0100751 os << "~0x" << std::hex << (gap / kPageSize) << "P";
752 num = 0u;
753 size = 0u;
754 }
Roland Levillain14d90572015-07-16 10:52:26 +0100755 CHECK_ALIGNED(it->second->BaseSize(), kPageSize);
Vladimir Marko17a924a2015-05-08 15:17:32 +0100756 ++num;
757 size += it->second->BaseSize();
758 end = it->second->BaseEnd();
759 ++it;
760 }
761 os << "+0x" << std::hex << (size / kPageSize) << "P";
762 if (num != 1u) {
763 os << "(" << std::dec << num << ")";
764 }
765 os << " prot=0x" << std::hex << map->GetProtect() << " " << map->GetName() << "]" << std::endl;
766 }
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700767}
768
769bool MemMap::HasMemMap(MemMap* map) {
770 void* base_begin = map->BaseBegin();
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700771 for (auto it = maps_->lower_bound(base_begin), end = maps_->end();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700772 it != end && it->first == base_begin; ++it) {
773 if (it->second == map) {
774 return true;
775 }
776 }
777 return false;
778}
779
780MemMap* MemMap::GetLargestMemMapAt(void* address) {
781 size_t largest_size = 0;
782 MemMap* largest_map = nullptr;
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700783 DCHECK(maps_ != nullptr);
784 for (auto it = maps_->lower_bound(address), end = maps_->end();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700785 it != end && it->first == address; ++it) {
786 MemMap* map = it->second;
787 CHECK(map != nullptr);
788 if (largest_size < map->BaseSize()) {
789 largest_size = map->BaseSize();
790 largest_map = map;
791 }
792 }
793 return largest_map;
794}
795
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700796void MemMap::Init() {
797 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
798 if (maps_ == nullptr) {
799 // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
800 maps_ = new Maps;
801 }
802}
803
804void MemMap::Shutdown() {
805 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
806 delete maps_;
807 maps_ = nullptr;
808}
809
Mathieu Chartier379d09f2015-01-08 11:28:13 -0800810void MemMap::SetSize(size_t new_size) {
811 if (new_size == base_size_) {
812 return;
813 }
814 CHECK_ALIGNED(new_size, kPageSize);
815 CHECK_EQ(base_size_, size_) << "Unsupported";
816 CHECK_LE(new_size, base_size_);
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700817 MEMORY_TOOL_MAKE_UNDEFINED(
818 reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
819 new_size),
820 base_size_ - new_size);
Mathieu Chartier379d09f2015-01-08 11:28:13 -0800821 CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_size),
822 base_size_ - new_size), 0) << new_size << " " << base_size_;
823 base_size_ = new_size;
824 size_ = new_size;
825}
826
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800827std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700828 os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
829 mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
830 mem_map.GetName().c_str());
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800831 return os;
832}
833
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700834} // namespace art