blob: ac02bd827491196ba0b522ee9a522647f5d65a53 [file] [log] [blame]
Nicolas Geoffray2a905b22019-06-06 09:04:07 +01001/*
2 * Copyright 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "jit_memory_region.h"
18
19#include <android-base/unique_fd.h>
20#include "base/bit_utils.h" // For RoundDown, RoundUp
21#include "base/globals.h"
22#include "base/logging.h" // For VLOG.
23#include "base/memfd.h"
24#include "base/systrace.h"
25#include "gc/allocator/dlmalloc.h"
26#include "jit/jit_scoped_code_cache_write.h"
27#include "oat_quick_method_header.h"
28
29using android::base::unique_fd;
30
31namespace art {
32namespace jit {
33
34// Data cache will be half of the capacity
35// Code cache will be the other half of the capacity.
36// TODO: Make this variable?
37static constexpr size_t kCodeAndDataCapacityDivider = 2;
38
39bool JitMemoryRegion::InitializeMappings(bool rwx_memory_allowed,
40 bool is_zygote,
41 std::string* error_msg) {
42 ScopedTrace trace(__PRETTY_FUNCTION__);
43
44 const size_t capacity = max_capacity_;
45 const size_t data_capacity = capacity / kCodeAndDataCapacityDivider;
46 const size_t exec_capacity = capacity - data_capacity;
47
48 // File descriptor enabling dual-view mapping of code section.
49 unique_fd mem_fd;
50
51 // Zygote shouldn't create a shared mapping for JIT, so we cannot use dual view
52 // for it.
53 if (!is_zygote) {
54 // Bionic supports memfd_create, but the call may fail on older kernels.
55 mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0));
56 if (mem_fd.get() < 0) {
57 std::ostringstream oss;
58 oss << "Failed to initialize dual view JIT. memfd_create() error: " << strerror(errno);
59 if (!rwx_memory_allowed) {
60 // Without using RWX page permissions, the JIT can not fallback to single mapping as it
61 // requires tranitioning the code pages to RWX for updates.
62 *error_msg = oss.str();
63 return false;
64 }
65 VLOG(jit) << oss.str();
66 }
67 }
68
69 if (mem_fd.get() >= 0 && ftruncate(mem_fd, capacity) != 0) {
70 std::ostringstream oss;
71 oss << "Failed to initialize memory file: " << strerror(errno);
72 *error_msg = oss.str();
73 return false;
74 }
75
76 std::string data_cache_name = is_zygote ? "zygote-data-code-cache" : "data-code-cache";
77 std::string exec_cache_name = is_zygote ? "zygote-jit-code-cache" : "jit-code-cache";
78
79 std::string error_str;
80 // Map name specific for android_os_Debug.cpp accounting.
81 // Map in low 4gb to simplify accessing root tables for x86_64.
82 // We could do PC-relative addressing to avoid this problem, but that
83 // would require reserving code and data area before submitting, which
84 // means more windows for the code memory to be RWX.
85 int base_flags;
86 MemMap data_pages;
87 if (mem_fd.get() >= 0) {
88 // Dual view of JIT code cache case. Create an initial mapping of data pages large enough
89 // for data and non-writable view of JIT code pages. We use the memory file descriptor to
90 // enable dual mapping - we'll create a second mapping using the descriptor below. The
91 // mappings will look like:
92 //
93 // VA PA
94 //
95 // +---------------+
96 // | non exec code |\
97 // +---------------+ \
98 // : :\ \
99 // +---------------+.\.+---------------+
100 // | exec code | \| code |
101 // +---------------+...+---------------+
102 // | data | | data |
103 // +---------------+...+---------------+
104 //
105 // In this configuration code updates are written to the non-executable view of the code
106 // cache, and the executable view of the code cache has fixed RX memory protections.
107 //
108 // This memory needs to be mapped shared as the code portions will have two mappings.
109 base_flags = MAP_SHARED;
110 data_pages = MemMap::MapFile(
111 data_capacity + exec_capacity,
112 kProtRW,
113 base_flags,
114 mem_fd,
115 /* start= */ 0,
116 /* low_4gb= */ true,
117 data_cache_name.c_str(),
118 &error_str);
119 } else {
120 // Single view of JIT code cache case. Create an initial mapping of data pages large enough
121 // for data and JIT code pages. The mappings will look like:
122 //
123 // VA PA
124 //
125 // +---------------+...+---------------+
126 // | exec code | | code |
127 // +---------------+...+---------------+
128 // | data | | data |
129 // +---------------+...+---------------+
130 //
131 // In this configuration code updates are written to the executable view of the code cache,
132 // and the executable view of the code cache transitions RX to RWX for the update and then
133 // back to RX after the update.
134 base_flags = MAP_PRIVATE | MAP_ANON;
135 data_pages = MemMap::MapAnonymous(
136 data_cache_name.c_str(),
137 data_capacity + exec_capacity,
138 kProtRW,
139 /* low_4gb= */ true,
140 &error_str);
141 }
142
143 if (!data_pages.IsValid()) {
144 std::ostringstream oss;
145 oss << "Failed to create read write cache: " << error_str << " size=" << capacity;
146 *error_msg = oss.str();
147 return false;
148 }
149
150 MemMap exec_pages;
151 MemMap non_exec_pages;
152 if (exec_capacity > 0) {
153 uint8_t* const divider = data_pages.Begin() + data_capacity;
154 // Set initial permission for executable view to catch any SELinux permission problems early
155 // (for processes that cannot map WX pages). Otherwise, this region does not need to be
156 // executable as there is no code in the cache yet.
157 exec_pages = data_pages.RemapAtEnd(divider,
158 exec_cache_name.c_str(),
159 kProtRX,
160 base_flags | MAP_FIXED,
161 mem_fd.get(),
162 (mem_fd.get() >= 0) ? data_capacity : 0,
163 &error_str);
164 if (!exec_pages.IsValid()) {
165 std::ostringstream oss;
166 oss << "Failed to create read execute code cache: " << error_str << " size=" << capacity;
167 *error_msg = oss.str();
168 return false;
169 }
170
171 if (mem_fd.get() >= 0) {
172 // For dual view, create the secondary view of code memory used for updating code. This view
173 // is never executable.
174 std::string name = exec_cache_name + "-rw";
175 non_exec_pages = MemMap::MapFile(exec_capacity,
176 kProtR,
177 base_flags,
178 mem_fd,
179 /* start= */ data_capacity,
180 /* low_4GB= */ false,
181 name.c_str(),
182 &error_str);
183 if (!non_exec_pages.IsValid()) {
184 static const char* kFailedNxView = "Failed to map non-executable view of JIT code cache";
185 if (rwx_memory_allowed) {
186 // Log and continue as single view JIT (requires RWX memory).
187 VLOG(jit) << kFailedNxView;
188 } else {
189 *error_msg = kFailedNxView;
190 return false;
191 }
192 }
193 }
194 } else {
195 // Profiling only. No memory for code required.
196 }
197
198 data_pages_ = std::move(data_pages);
199 exec_pages_ = std::move(exec_pages);
200 non_exec_pages_ = std::move(non_exec_pages);
201 return true;
202}
203
204void JitMemoryRegion::InitializeState(size_t initial_capacity, size_t max_capacity) {
205 CHECK_GE(max_capacity, initial_capacity);
206 CHECK(max_capacity <= 1 * GB) << "The max supported size for JIT code cache is 1GB";
207 // Align both capacities to page size, as that's the unit mspaces use.
208 initial_capacity_ = RoundDown(initial_capacity, 2 * kPageSize);
209 max_capacity_ = RoundDown(max_capacity, 2 * kPageSize);
210 current_capacity_ = initial_capacity,
211 data_end_ = initial_capacity / kCodeAndDataCapacityDivider;
212 exec_end_ = initial_capacity - data_end_;
213}
214
215void JitMemoryRegion::InitializeSpaces() {
216 // Initialize the data heap
217 data_mspace_ = create_mspace_with_base(data_pages_.Begin(), data_end_, false /*locked*/);
218 CHECK(data_mspace_ != nullptr) << "create_mspace_with_base (data) failed";
219
220 // Initialize the code heap
221 MemMap* code_heap = nullptr;
222 if (non_exec_pages_.IsValid()) {
223 code_heap = &non_exec_pages_;
224 } else if (exec_pages_.IsValid()) {
225 code_heap = &exec_pages_;
226 }
227 if (code_heap != nullptr) {
228 // Make all pages reserved for the code heap writable. The mspace allocator, that manages the
229 // heap, will take and initialize pages in create_mspace_with_base().
230 CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW);
231 exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
232 CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
233 SetFootprintLimit(initial_capacity_);
234 // Protect pages containing heap metadata. Updates to the code heap toggle write permission to
235 // perform the update and there are no other times write access is required.
236 CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR);
237 } else {
238 exec_mspace_ = nullptr;
239 SetFootprintLimit(initial_capacity_);
240 }
241}
242
243void JitMemoryRegion::SetFootprintLimit(size_t new_footprint) {
244 size_t data_space_footprint = new_footprint / kCodeAndDataCapacityDivider;
245 DCHECK(IsAlignedParam(data_space_footprint, kPageSize));
246 DCHECK_EQ(data_space_footprint * kCodeAndDataCapacityDivider, new_footprint);
247 mspace_set_footprint_limit(data_mspace_, data_space_footprint);
248 if (HasCodeMapping()) {
249 ScopedCodeCacheWrite scc(*this);
250 mspace_set_footprint_limit(exec_mspace_, new_footprint - data_space_footprint);
251 }
252}
253
254bool JitMemoryRegion::IncreaseCodeCacheCapacity() {
255 if (current_capacity_ == max_capacity_) {
256 return false;
257 }
258
259 // Double the capacity if we're below 1MB, or increase it by 1MB if
260 // we're above.
261 if (current_capacity_ < 1 * MB) {
262 current_capacity_ *= 2;
263 } else {
264 current_capacity_ += 1 * MB;
265 }
266 if (current_capacity_ > max_capacity_) {
267 current_capacity_ = max_capacity_;
268 }
269
270 VLOG(jit) << "Increasing code cache capacity to " << PrettySize(current_capacity_);
271
272 SetFootprintLimit(current_capacity_);
273
274 return true;
275}
276
277// NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock
278// is already held.
279void* JitMemoryRegion::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS {
280 if (mspace == exec_mspace_) {
281 DCHECK(exec_mspace_ != nullptr);
282 const MemMap* const code_pages = GetUpdatableCodeMapping();
283 void* result = code_pages->Begin() + exec_end_;
284 exec_end_ += increment;
285 return result;
286 } else {
287 DCHECK_EQ(data_mspace_, mspace);
288 void* result = data_pages_.Begin() + data_end_;
289 data_end_ += increment;
290 return result;
291 }
292}
293
294uint8_t* JitMemoryRegion::AllocateCode(size_t code_size) {
295 // Each allocation should be on its own set of cache lines.
296 // `code_size` covers the OatQuickMethodHeader, the JIT generated machine code,
297 // and any alignment padding.
298 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
299 size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
300 DCHECK_GT(code_size, header_size);
301 uint8_t* result = reinterpret_cast<uint8_t*>(
302 mspace_memalign(exec_mspace_, kJitCodeAlignment, code_size));
303 // Ensure the header ends up at expected instruction alignment.
304 DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment);
305 used_memory_for_code_ += mspace_usable_size(result);
306 return result;
307}
308
309void JitMemoryRegion::FreeCode(uint8_t* code) {
310 code = GetNonExecutableAddress(code);
311 used_memory_for_code_ -= mspace_usable_size(code);
312 mspace_free(exec_mspace_, code);
313}
314
315uint8_t* JitMemoryRegion::AllocateData(size_t data_size) {
316 void* result = mspace_malloc(data_mspace_, data_size);
317 used_memory_for_data_ += mspace_usable_size(result);
318 return reinterpret_cast<uint8_t*>(result);
319}
320
321void JitMemoryRegion::FreeData(uint8_t* data) {
322 used_memory_for_data_ -= mspace_usable_size(data);
323 mspace_free(data_mspace_, data);
324}
325
326} // namespace jit
327} // namespace art