blob: 3d4c2184f1b4d3fcb720a785de6477bd3033cad8 [file] [log] [blame]
Vladimir Markob163bb72015-03-31 21:49:49 +01001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "linker/arm64/relative_patcher_arm64.h"
18
19#include "arch/arm64/instruction_set_features_arm64.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070020#include "art_method.h"
Vladimir Markob163bb72015-03-31 21:49:49 +010021#include "compiled_method.h"
22#include "driver/compiler_driver.h"
Vladimir Marko131980f2015-12-03 18:29:23 +000023#include "linker/output_stream.h"
Vladimir Markob163bb72015-03-31 21:49:49 +010024#include "oat.h"
Nicolas Geoffray524e7ea2015-10-16 17:13:34 +010025#include "oat_quick_method_header.h"
Vladimir Marko131980f2015-12-03 18:29:23 +000026#include "utils/arm64/assembler_arm64.h"
Vladimir Markob163bb72015-03-31 21:49:49 +010027
28namespace art {
29namespace linker {
30
31Arm64RelativePatcher::Arm64RelativePatcher(RelativePatcherTargetProvider* provider,
32 const Arm64InstructionSetFeatures* features)
33 : ArmBaseRelativePatcher(provider, kArm64, CompileThunkCode(),
34 kMaxPositiveDisplacement, kMaxNegativeDisplacement),
35 fix_cortex_a53_843419_(features->NeedFixCortexA53_843419()),
36 reserved_adrp_thunks_(0u),
37 processed_adrp_thunks_(0u) {
38 if (fix_cortex_a53_843419_) {
39 adrp_thunk_locations_.reserve(16u);
40 current_method_thunks_.reserve(16u * kAdrpThunkSize);
41 }
42}
43
44uint32_t Arm64RelativePatcher::ReserveSpace(uint32_t offset,
Vladimir Marko4d23c9d2015-04-01 23:03:09 +010045 const CompiledMethod* compiled_method,
46 MethodReference method_ref) {
Vladimir Markob163bb72015-03-31 21:49:49 +010047 if (!fix_cortex_a53_843419_) {
48 DCHECK(adrp_thunk_locations_.empty());
Vladimir Marko4d23c9d2015-04-01 23:03:09 +010049 return ReserveSpaceInternal(offset, compiled_method, method_ref, 0u);
Vladimir Markob163bb72015-03-31 21:49:49 +010050 }
51
52 // Add thunks for previous method if any.
53 if (reserved_adrp_thunks_ != adrp_thunk_locations_.size()) {
54 size_t num_adrp_thunks = adrp_thunk_locations_.size() - reserved_adrp_thunks_;
55 offset = CompiledMethod::AlignCode(offset, kArm64) + kAdrpThunkSize * num_adrp_thunks;
56 reserved_adrp_thunks_ = adrp_thunk_locations_.size();
57 }
58
59 // Count the number of ADRP insns as the upper bound on the number of thunks needed
60 // and use it to reserve space for other linker patches.
61 size_t num_adrp = 0u;
Vladimir Marko71b0ddf2015-04-02 19:45:06 +010062 DCHECK(compiled_method != nullptr);
63 for (const LinkerPatch& patch : compiled_method->GetPatches()) {
64 if (patch.Type() == kLinkerPatchDexCacheArray &&
65 patch.LiteralOffset() == patch.PcInsnOffset()) { // ADRP patch
66 ++num_adrp;
Vladimir Markob163bb72015-03-31 21:49:49 +010067 }
68 }
Vladimir Marko4d23c9d2015-04-01 23:03:09 +010069 offset = ReserveSpaceInternal(offset, compiled_method, method_ref, kAdrpThunkSize * num_adrp);
Vladimir Markob163bb72015-03-31 21:49:49 +010070 if (num_adrp == 0u) {
71 return offset;
72 }
73
74 // Now that we have the actual offset where the code will be placed, locate the ADRP insns
75 // that actually require the thunk.
76 uint32_t quick_code_offset = compiled_method->AlignCode(offset) + sizeof(OatQuickMethodHeader);
Vladimir Marko35831e82015-09-11 11:59:18 +010077 ArrayRef<const uint8_t> code = compiled_method->GetQuickCode();
Vladimir Markob163bb72015-03-31 21:49:49 +010078 uint32_t thunk_offset = compiled_method->AlignCode(quick_code_offset + code.size());
79 DCHECK(compiled_method != nullptr);
80 for (const LinkerPatch& patch : compiled_method->GetPatches()) {
81 if (patch.Type() == kLinkerPatchDexCacheArray &&
82 patch.LiteralOffset() == patch.PcInsnOffset()) { // ADRP patch
83 uint32_t patch_offset = quick_code_offset + patch.LiteralOffset();
84 if (NeedsErratum843419Thunk(code, patch.LiteralOffset(), patch_offset)) {
85 adrp_thunk_locations_.emplace_back(patch_offset, thunk_offset);
86 thunk_offset += kAdrpThunkSize;
87 }
88 }
89 }
90 return offset;
91}
92
Vladimir Marko71b0ddf2015-04-02 19:45:06 +010093uint32_t Arm64RelativePatcher::ReserveSpaceEnd(uint32_t offset) {
94 if (!fix_cortex_a53_843419_) {
95 DCHECK(adrp_thunk_locations_.empty());
96 } else {
97 // Add thunks for the last method if any.
98 if (reserved_adrp_thunks_ != adrp_thunk_locations_.size()) {
99 size_t num_adrp_thunks = adrp_thunk_locations_.size() - reserved_adrp_thunks_;
100 offset = CompiledMethod::AlignCode(offset, kArm64) + kAdrpThunkSize * num_adrp_thunks;
101 reserved_adrp_thunks_ = adrp_thunk_locations_.size();
102 }
103 }
104 return ArmBaseRelativePatcher::ReserveSpaceEnd(offset);
105}
106
Vladimir Markob163bb72015-03-31 21:49:49 +0100107uint32_t Arm64RelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) {
108 if (fix_cortex_a53_843419_) {
109 if (!current_method_thunks_.empty()) {
110 uint32_t aligned_offset = CompiledMethod::AlignCode(offset, kArm64);
111 if (kIsDebugBuild) {
Roland Levillain14d90572015-07-16 10:52:26 +0100112 CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize);
Vladimir Markob163bb72015-03-31 21:49:49 +0100113 size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize;
114 CHECK_LE(num_thunks, processed_adrp_thunks_);
115 for (size_t i = 0u; i != num_thunks; ++i) {
116 const auto& entry = adrp_thunk_locations_[processed_adrp_thunks_ - num_thunks + i];
117 CHECK_EQ(entry.second, aligned_offset + i * kAdrpThunkSize);
118 }
119 }
120 uint32_t aligned_code_delta = aligned_offset - offset;
121 if (aligned_code_delta != 0u && !WriteCodeAlignment(out, aligned_code_delta)) {
122 return 0u;
123 }
124 if (!WriteMiscThunk(out, ArrayRef<const uint8_t>(current_method_thunks_))) {
125 return 0u;
126 }
127 offset = aligned_offset + current_method_thunks_.size();
128 current_method_thunks_.clear();
129 }
130 }
131 return ArmBaseRelativePatcher::WriteThunks(out, offset);
132}
133
134void Arm64RelativePatcher::PatchCall(std::vector<uint8_t>* code, uint32_t literal_offset,
135 uint32_t patch_offset, uint32_t target_offset) {
136 DCHECK_LE(literal_offset + 4u, code->size());
137 DCHECK_EQ(literal_offset & 3u, 0u);
138 DCHECK_EQ(patch_offset & 3u, 0u);
139 DCHECK_EQ(target_offset & 3u, 0u);
140 uint32_t displacement = CalculateDisplacement(patch_offset, target_offset & ~1u);
141 DCHECK_EQ(displacement & 3u, 0u);
142 DCHECK((displacement >> 27) == 0u || (displacement >> 27) == 31u); // 28-bit signed.
143 uint32_t insn = (displacement & 0x0fffffffu) >> 2;
144 insn |= 0x94000000; // BL
145
146 // Check that we're just overwriting an existing BL.
147 DCHECK_EQ(GetInsn(code, literal_offset) & 0xfc000000u, 0x94000000u);
148 // Write the new BL.
149 SetInsn(code, literal_offset, insn);
150}
151
152void Arm64RelativePatcher::PatchDexCacheReference(std::vector<uint8_t>* code,
153 const LinkerPatch& patch,
154 uint32_t patch_offset,
155 uint32_t target_offset) {
156 DCHECK_EQ(patch_offset & 3u, 0u);
157 DCHECK_EQ(target_offset & 3u, 0u);
158 uint32_t literal_offset = patch.LiteralOffset();
159 uint32_t insn = GetInsn(code, literal_offset);
160 uint32_t pc_insn_offset = patch.PcInsnOffset();
161 uint32_t disp = target_offset - ((patch_offset - literal_offset + pc_insn_offset) & ~0xfffu);
Mathieu Chartiere401d142015-04-22 13:56:20 -0700162 bool wide = (insn & 0x40000000) != 0;
163 uint32_t shift = wide ? 3u : 2u;
Vladimir Markob163bb72015-03-31 21:49:49 +0100164 if (literal_offset == pc_insn_offset) {
165 // Check it's an ADRP with imm == 0 (unset).
166 DCHECK_EQ((insn & 0xffffffe0u), 0x90000000u)
167 << literal_offset << ", " << pc_insn_offset << ", 0x" << std::hex << insn;
168 if (fix_cortex_a53_843419_ && processed_adrp_thunks_ != adrp_thunk_locations_.size() &&
169 adrp_thunk_locations_[processed_adrp_thunks_].first == patch_offset) {
170 DCHECK(NeedsErratum843419Thunk(ArrayRef<const uint8_t>(*code),
171 literal_offset, patch_offset));
172 uint32_t thunk_offset = adrp_thunk_locations_[processed_adrp_thunks_].second;
173 uint32_t adrp_disp = target_offset - (thunk_offset & ~0xfffu);
174 uint32_t adrp = PatchAdrp(insn, adrp_disp);
175
176 uint32_t out_disp = thunk_offset - patch_offset;
177 DCHECK_EQ(out_disp & 3u, 0u);
178 DCHECK((out_disp >> 27) == 0u || (out_disp >> 27) == 31u); // 28-bit signed.
Mathieu Chartiere401d142015-04-22 13:56:20 -0700179 insn = (out_disp & 0x0fffffffu) >> shift;
Vladimir Markob163bb72015-03-31 21:49:49 +0100180 insn |= 0x14000000; // B <thunk>
181
182 uint32_t back_disp = -out_disp;
183 DCHECK_EQ(back_disp & 3u, 0u);
184 DCHECK((back_disp >> 27) == 0u || (back_disp >> 27) == 31u); // 28-bit signed.
185 uint32_t b_back = (back_disp & 0x0fffffffu) >> 2;
186 b_back |= 0x14000000; // B <back>
187 size_t thunks_code_offset = current_method_thunks_.size();
188 current_method_thunks_.resize(thunks_code_offset + kAdrpThunkSize);
189 SetInsn(&current_method_thunks_, thunks_code_offset, adrp);
190 SetInsn(&current_method_thunks_, thunks_code_offset + 4u, b_back);
191 static_assert(kAdrpThunkSize == 2 * 4u, "thunk has 2 instructions");
192
193 processed_adrp_thunks_ += 1u;
194 } else {
195 insn = PatchAdrp(insn, disp);
196 }
197 // Write the new ADRP (or B to the erratum 843419 thunk).
198 SetInsn(code, literal_offset, insn);
199 } else {
Mathieu Chartiere401d142015-04-22 13:56:20 -0700200 // LDR 32-bit or 64-bit with imm12 == 0 (unset).
201 DCHECK_EQ(insn & 0xbffffc00, 0xb9400000) << insn;
Vladimir Markob163bb72015-03-31 21:49:49 +0100202 if (kIsDebugBuild) {
203 uint32_t adrp = GetInsn(code, pc_insn_offset);
204 if ((adrp & 0x9f000000u) != 0x90000000u) {
205 CHECK(fix_cortex_a53_843419_);
206 CHECK_EQ(adrp & 0xfc000000u, 0x14000000u); // B <thunk>
Roland Levillain14d90572015-07-16 10:52:26 +0100207 CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize);
Vladimir Markob163bb72015-03-31 21:49:49 +0100208 size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize;
209 CHECK_LE(num_thunks, processed_adrp_thunks_);
210 uint32_t b_offset = patch_offset - literal_offset + pc_insn_offset;
211 for (size_t i = processed_adrp_thunks_ - num_thunks; ; ++i) {
212 CHECK_NE(i, processed_adrp_thunks_);
213 if (adrp_thunk_locations_[i].first == b_offset) {
214 size_t idx = num_thunks - (processed_adrp_thunks_ - i);
215 adrp = GetInsn(&current_method_thunks_, idx * kAdrpThunkSize);
216 break;
217 }
218 }
219 }
220 CHECK_EQ(adrp & 0x9f00001fu, // Check that pc_insn_offset points
221 0x90000000 | ((insn >> 5) & 0x1fu)); // to ADRP with matching register.
222 }
Mathieu Chartiere401d142015-04-22 13:56:20 -0700223 uint32_t imm12 = (disp & 0xfffu) >> shift;
Vladimir Markob163bb72015-03-31 21:49:49 +0100224 insn = (insn & ~(0xfffu << 10)) | (imm12 << 10);
225 SetInsn(code, literal_offset, insn);
226 }
227}
228
229std::vector<uint8_t> Arm64RelativePatcher::CompileThunkCode() {
230 // The thunk just uses the entry point in the ArtMethod. This works even for calls
231 // to the generic JNI and interpreter trampolines.
232 arm64::Arm64Assembler assembler;
Mathieu Chartiere401d142015-04-22 13:56:20 -0700233 Offset offset(ArtMethod::EntryPointFromQuickCompiledCodeOffset(
Vladimir Markob163bb72015-03-31 21:49:49 +0100234 kArm64PointerSize).Int32Value());
235 assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0));
236 // Ensure we emit the literal pool.
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000237 assembler.FinalizeCode();
Vladimir Markob163bb72015-03-31 21:49:49 +0100238 std::vector<uint8_t> thunk_code(assembler.CodeSize());
239 MemoryRegion code(thunk_code.data(), thunk_code.size());
240 assembler.FinalizeInstructions(code);
241 return thunk_code;
242}
243
244uint32_t Arm64RelativePatcher::PatchAdrp(uint32_t adrp, uint32_t disp) {
245 return (adrp & 0x9f00001fu) | // Clear offset bits, keep ADRP with destination reg.
246 // Bottom 12 bits are ignored, the next 2 lowest bits are encoded in bits 29-30.
247 ((disp & 0x00003000u) << (29 - 12)) |
248 // The next 16 bits are encoded in bits 5-22.
249 ((disp & 0xffffc000u) >> (12 + 2 - 5)) |
250 // Since the target_offset is based on the beginning of the oat file and the
251 // image space precedes the oat file, the target_offset into image space will
252 // be negative yet passed as uint32_t. Therefore we limit the displacement
253 // to +-2GiB (rather than the maximim +-4GiB) and determine the sign bit from
254 // the highest bit of the displacement. This is encoded in bit 23.
255 ((disp & 0x80000000u) >> (31 - 23));
256}
257
258bool Arm64RelativePatcher::NeedsErratum843419Thunk(ArrayRef<const uint8_t> code,
259 uint32_t literal_offset,
260 uint32_t patch_offset) {
261 DCHECK_EQ(patch_offset & 0x3u, 0u);
262 if ((patch_offset & 0xff8) == 0xff8) { // ...ff8 or ...ffc
263 uint32_t adrp = GetInsn(code, literal_offset);
264 DCHECK_EQ(adrp & 0xff000000, 0x90000000);
Matteo Franchin97e2f262015-04-02 15:49:06 +0100265 uint32_t next_offset = patch_offset + 4u;
Vladimir Markob163bb72015-03-31 21:49:49 +0100266 uint32_t next_insn = GetInsn(code, literal_offset + 4u);
Matteo Franchin97e2f262015-04-02 15:49:06 +0100267
268 // Below we avoid patching sequences where the adrp is followed by a load which can easily
269 // be proved to be aligned.
270
271 // First check if the next insn is the LDR using the result of the ADRP.
272 // LDR <Wt>, [<Xn>, #pimm], where <Xn> == ADRP destination reg.
273 if ((next_insn & 0xffc00000) == 0xb9400000 &&
274 (((next_insn >> 5) ^ adrp) & 0x1f) == 0) {
275 return false;
276 }
277
278 // LDR <Wt>, <label> is always aligned and thus it doesn't cause boundary crossing.
279 if ((next_insn & 0xff000000) == 0x18000000) {
280 return false;
281 }
282
283 // LDR <Xt>, <label> is aligned iff the pc + displacement is a multiple of 8.
284 if ((next_insn & 0xff000000) == 0x58000000) {
285 bool is_aligned_load = (((next_offset >> 2) ^ (next_insn >> 5)) & 1) == 0;
286 return !is_aligned_load;
287 }
288
289 // LDR <Wt>, [SP, #<pimm>] and LDR <Xt>, [SP, #<pimm>] are always aligned loads, as SP is
290 // guaranteed to be 128-bits aligned and <pimm> is multiple of the load size.
291 if ((next_insn & 0xbfc003e0) == 0xb94003e0) {
292 return false;
293 }
294 return true;
Vladimir Markob163bb72015-03-31 21:49:49 +0100295 }
296 return false;
297}
298
299void Arm64RelativePatcher::SetInsn(std::vector<uint8_t>* code, uint32_t offset, uint32_t value) {
300 DCHECK_LE(offset + 4u, code->size());
301 DCHECK_EQ(offset & 3u, 0u);
302 uint8_t* addr = &(*code)[offset];
303 addr[0] = (value >> 0) & 0xff;
304 addr[1] = (value >> 8) & 0xff;
305 addr[2] = (value >> 16) & 0xff;
306 addr[3] = (value >> 24) & 0xff;
307}
308
309uint32_t Arm64RelativePatcher::GetInsn(ArrayRef<const uint8_t> code, uint32_t offset) {
310 DCHECK_LE(offset + 4u, code.size());
311 DCHECK_EQ(offset & 3u, 0u);
312 const uint8_t* addr = &code[offset];
313 return
314 (static_cast<uint32_t>(addr[0]) << 0) +
315 (static_cast<uint32_t>(addr[1]) << 8) +
316 (static_cast<uint32_t>(addr[2]) << 16)+
317 (static_cast<uint32_t>(addr[3]) << 24);
318}
319
320template <typename Alloc>
321uint32_t Arm64RelativePatcher::GetInsn(std::vector<uint8_t, Alloc>* code, uint32_t offset) {
322 return GetInsn(ArrayRef<const uint8_t>(*code), offset);
323}
324
325} // namespace linker
326} // namespace art