blob: 5b2c8babf3f3d67bd0f3707d10d229eb51693447 [file] [log] [blame]
Serban Constantinescued8dd492014-02-11 14:15:10 +00001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "assembler_arm64.h"
18#include "base/logging.h"
19#include "entrypoints/quick/quick_entrypoints.h"
20#include "offsets.h"
21#include "thread.h"
22#include "utils.h"
23
24namespace art {
25namespace arm64 {
26
27#ifdef ___
28#error "ARM64 Assembler macro already defined."
29#else
30#define ___ vixl_masm_->
31#endif
32
33void Arm64Assembler::EmitSlowPaths() {
34 if (!exception_blocks_.empty()) {
35 for (size_t i = 0; i < exception_blocks_.size(); i++) {
36 EmitExceptionPoll(exception_blocks_.at(i));
37 }
38 }
39 ___ FinalizeCode();
40}
41
42size_t Arm64Assembler::CodeSize() const {
43 return ___ SizeOfCodeGenerated();
44}
45
46void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) {
47 // Copy the instructions from the buffer.
48 MemoryRegion from(reinterpret_cast<void*>(vixl_buf_), CodeSize());
49 region.CopyFrom(0, from);
50}
51
52void Arm64Assembler::GetCurrentThread(ManagedRegister tr) {
53 ___ Mov(reg_x(tr.AsArm64().AsCoreRegister()), reg_x(TR));
54}
55
56void Arm64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) {
57 StoreToOffset(TR, SP, offset.Int32Value());
58}
59
60// See Arm64 PCS Section 5.2.2.1.
61void Arm64Assembler::IncreaseFrameSize(size_t adjust) {
62 CHECK_ALIGNED(adjust, kStackAlignment);
63 AddConstant(SP, -adjust);
64}
65
66// See Arm64 PCS Section 5.2.2.1.
67void Arm64Assembler::DecreaseFrameSize(size_t adjust) {
68 CHECK_ALIGNED(adjust, kStackAlignment);
69 AddConstant(SP, adjust);
70}
71
72void Arm64Assembler::AddConstant(Register rd, int32_t value, Condition cond) {
73 AddConstant(rd, rd, value, cond);
74}
75
76void Arm64Assembler::AddConstant(Register rd, Register rn, int32_t value,
77 Condition cond) {
78 if ((cond == AL) || (cond == NV)) {
79 // VIXL macro-assembler handles all variants.
80 ___ Add(reg_x(rd), reg_x(rn), value);
81 } else {
82 // ip1 = rd + value
83 // rd = cond ? ip1 : rn
84 CHECK_NE(rn, IP1);
85 ___ Add(reg_x(IP1), reg_x(rn), value);
86 ___ Csel(reg_x(rd), reg_x(IP1), reg_x(rd), COND_OP(cond));
87 }
88}
89
90void Arm64Assembler::StoreWToOffset(StoreOperandType type, WRegister source,
91 Register base, int32_t offset) {
92 switch (type) {
93 case kStoreByte:
94 ___ Strb(reg_w(source), MEM_OP(reg_x(base), offset));
95 break;
96 case kStoreHalfword:
97 ___ Strh(reg_w(source), MEM_OP(reg_x(base), offset));
98 break;
99 case kStoreWord:
100 ___ Str(reg_w(source), MEM_OP(reg_x(base), offset));
101 break;
102 default:
103 LOG(FATAL) << "UNREACHABLE";
104 }
105}
106
107void Arm64Assembler::StoreToOffset(Register source, Register base, int32_t offset) {
108 CHECK_NE(source, SP);
109 ___ Str(reg_x(source), MEM_OP(reg_x(base), offset));
110}
111
112void Arm64Assembler::StoreSToOffset(SRegister source, Register base, int32_t offset) {
113 ___ Str(reg_s(source), MEM_OP(reg_x(base), offset));
114}
115
116void Arm64Assembler::StoreDToOffset(DRegister source, Register base, int32_t offset) {
117 ___ Str(reg_d(source), MEM_OP(reg_x(base), offset));
118}
119
120void Arm64Assembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) {
121 Arm64ManagedRegister src = m_src.AsArm64();
122 if (src.IsNoRegister()) {
123 CHECK_EQ(0u, size);
124 } else if (src.IsWRegister()) {
125 CHECK_EQ(4u, size);
126 StoreWToOffset(kStoreWord, src.AsWRegister(), SP, offs.Int32Value());
127 } else if (src.IsCoreRegister()) {
128 CHECK_EQ(8u, size);
129 StoreToOffset(src.AsCoreRegister(), SP, offs.Int32Value());
130 } else if (src.IsSRegister()) {
131 StoreSToOffset(src.AsSRegister(), SP, offs.Int32Value());
132 } else {
133 CHECK(src.IsDRegister()) << src;
134 StoreDToOffset(src.AsDRegister(), SP, offs.Int32Value());
135 }
136}
137
138void Arm64Assembler::StoreRef(FrameOffset offs, ManagedRegister m_src) {
139 Arm64ManagedRegister src = m_src.AsArm64();
140 CHECK(src.IsCoreRegister()) << src;
141 StoreToOffset(src.AsCoreRegister(), SP, offs.Int32Value());
142}
143
144void Arm64Assembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
145 Arm64ManagedRegister src = m_src.AsArm64();
146 CHECK(src.IsCoreRegister()) << src;
147 StoreToOffset(src.AsCoreRegister(), SP, offs.Int32Value());
148}
149
150void Arm64Assembler::StoreImmediateToFrame(FrameOffset offs, uint32_t imm,
151 ManagedRegister m_scratch) {
152 Arm64ManagedRegister scratch = m_scratch.AsArm64();
153 CHECK(scratch.IsCoreRegister()) << scratch;
154 LoadImmediate(scratch.AsCoreRegister(), imm);
155 StoreToOffset(scratch.AsCoreRegister(), SP, offs.Int32Value());
156}
157
Ian Rogersdd7624d2014-03-14 17:43:00 -0700158void Arm64Assembler::StoreImmediateToThread32(ThreadOffset<4> offs, uint32_t imm,
Serban Constantinescued8dd492014-02-11 14:15:10 +0000159 ManagedRegister m_scratch) {
160 Arm64ManagedRegister scratch = m_scratch.AsArm64();
161 CHECK(scratch.IsCoreRegister()) << scratch;
162 LoadImmediate(scratch.AsCoreRegister(), imm);
163 StoreToOffset(scratch.AsCoreRegister(), TR, offs.Int32Value());
164}
165
Ian Rogersdd7624d2014-03-14 17:43:00 -0700166void Arm64Assembler::StoreStackOffsetToThread32(ThreadOffset<4> tr_offs,
Serban Constantinescued8dd492014-02-11 14:15:10 +0000167 FrameOffset fr_offs,
168 ManagedRegister m_scratch) {
169 Arm64ManagedRegister scratch = m_scratch.AsArm64();
170 CHECK(scratch.IsCoreRegister()) << scratch;
171 AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
172 StoreToOffset(scratch.AsCoreRegister(), TR, tr_offs.Int32Value());
173}
174
Ian Rogersdd7624d2014-03-14 17:43:00 -0700175void Arm64Assembler::StoreStackPointerToThread32(ThreadOffset<4> tr_offs) {
Serban Constantinescued8dd492014-02-11 14:15:10 +0000176 // Arm64 does not support: "str sp, [dest]" therefore we use IP1 as a temp reg.
177 ___ Mov(reg_x(IP1), reg_x(SP));
178 StoreToOffset(IP1, TR, tr_offs.Int32Value());
179}
180
181void Arm64Assembler::StoreSpanning(FrameOffset dest_off, ManagedRegister m_source,
182 FrameOffset in_off, ManagedRegister m_scratch) {
183 Arm64ManagedRegister source = m_source.AsArm64();
184 Arm64ManagedRegister scratch = m_scratch.AsArm64();
185 StoreToOffset(source.AsCoreRegister(), SP, dest_off.Int32Value());
186 LoadFromOffset(scratch.AsCoreRegister(), SP, in_off.Int32Value());
187 StoreToOffset(scratch.AsCoreRegister(), SP, dest_off.Int32Value() + 8);
188}
189
190// Load routines.
191void Arm64Assembler::LoadImmediate(Register dest, int32_t value,
192 Condition cond) {
193 if ((cond == AL) || (cond == NV)) {
194 ___ Mov(reg_x(dest), value);
195 } else {
196 // ip1 = value
197 // rd = cond ? ip1 : rd
198 if (value != 0) {
199 CHECK_NE(dest, IP1);
200 ___ Mov(reg_x(IP1), value);
201 ___ Csel(reg_x(dest), reg_x(IP1), reg_x(dest), COND_OP(cond));
202 } else {
203 ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), COND_OP(cond));
204 }
205 }
206}
207
208void Arm64Assembler::LoadWFromOffset(LoadOperandType type, WRegister dest,
209 Register base, int32_t offset) {
210 switch (type) {
211 case kLoadSignedByte:
212 ___ Ldrsb(reg_w(dest), MEM_OP(reg_x(base), offset));
213 break;
214 case kLoadSignedHalfword:
215 ___ Ldrsh(reg_w(dest), MEM_OP(reg_x(base), offset));
216 break;
217 case kLoadUnsignedByte:
218 ___ Ldrb(reg_w(dest), MEM_OP(reg_x(base), offset));
219 break;
220 case kLoadUnsignedHalfword:
221 ___ Ldrh(reg_w(dest), MEM_OP(reg_x(base), offset));
222 break;
223 case kLoadWord:
224 ___ Ldr(reg_w(dest), MEM_OP(reg_x(base), offset));
225 break;
226 default:
227 LOG(FATAL) << "UNREACHABLE";
228 }
229}
230
231// Note: We can extend this member by adding load type info - see
232// sign extended A64 load variants.
233void Arm64Assembler::LoadFromOffset(Register dest, Register base,
234 int32_t offset) {
235 CHECK_NE(dest, SP);
236 ___ Ldr(reg_x(dest), MEM_OP(reg_x(base), offset));
237}
238
239void Arm64Assembler::LoadSFromOffset(SRegister dest, Register base,
240 int32_t offset) {
241 ___ Ldr(reg_s(dest), MEM_OP(reg_x(base), offset));
242}
243
244void Arm64Assembler::LoadDFromOffset(DRegister dest, Register base,
245 int32_t offset) {
246 ___ Ldr(reg_d(dest), MEM_OP(reg_x(base), offset));
247}
248
249void Arm64Assembler::Load(Arm64ManagedRegister dest, Register base,
250 int32_t offset, size_t size) {
251 if (dest.IsNoRegister()) {
252 CHECK_EQ(0u, size) << dest;
253 } else if (dest.IsWRegister()) {
254 CHECK_EQ(4u, size) << dest;
255 ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset));
256 } else if (dest.IsCoreRegister()) {
257 CHECK_EQ(8u, size) << dest;
258 CHECK_NE(dest.AsCoreRegister(), SP) << dest;
259 ___ Ldr(reg_x(dest.AsCoreRegister()), MEM_OP(reg_x(base), offset));
260 } else if (dest.IsSRegister()) {
261 ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset));
262 } else {
263 CHECK(dest.IsDRegister()) << dest;
264 ___ Ldr(reg_d(dest.AsDRegister()), MEM_OP(reg_x(base), offset));
265 }
266}
267
268void Arm64Assembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
269 return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
270}
271
Ian Rogersdd7624d2014-03-14 17:43:00 -0700272void Arm64Assembler::LoadFromThread32(ManagedRegister m_dst, ThreadOffset<4> src, size_t size) {
Serban Constantinescued8dd492014-02-11 14:15:10 +0000273 return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
274}
275
276void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
277 Arm64ManagedRegister dst = m_dst.AsArm64();
278 CHECK(dst.IsCoreRegister()) << dst;
279 LoadFromOffset(dst.AsCoreRegister(), SP, offs.Int32Value());
280}
281
282void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base,
283 MemberOffset offs) {
284 Arm64ManagedRegister dst = m_dst.AsArm64();
285 Arm64ManagedRegister base = m_base.AsArm64();
286 CHECK(dst.IsCoreRegister() && base.IsCoreRegister());
287 LoadFromOffset(dst.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value());
288}
289
290void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, Offset offs) {
291 Arm64ManagedRegister dst = m_dst.AsArm64();
292 Arm64ManagedRegister base = m_base.AsArm64();
293 CHECK(dst.IsCoreRegister() && base.IsCoreRegister());
294 LoadFromOffset(dst.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value());
295}
296
Ian Rogersdd7624d2014-03-14 17:43:00 -0700297void Arm64Assembler::LoadRawPtrFromThread32(ManagedRegister m_dst, ThreadOffset<4> offs) {
Serban Constantinescued8dd492014-02-11 14:15:10 +0000298 Arm64ManagedRegister dst = m_dst.AsArm64();
299 CHECK(dst.IsCoreRegister()) << dst;
300 LoadFromOffset(dst.AsCoreRegister(), TR, offs.Int32Value());
301}
302
303// Copying routines.
304void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t size) {
305 Arm64ManagedRegister dst = m_dst.AsArm64();
306 Arm64ManagedRegister src = m_src.AsArm64();
307 if (!dst.Equals(src)) {
308 if (dst.IsCoreRegister()) {
309 CHECK(src.IsCoreRegister()) << src;
310 ___ Mov(reg_x(dst.AsCoreRegister()), reg_x(src.AsCoreRegister()));
311 } else if (dst.IsWRegister()) {
312 CHECK(src.IsWRegister()) << src;
313 ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister()));
314 } else if (dst.IsSRegister()) {
315 CHECK(src.IsSRegister()) << src;
316 ___ Fmov(reg_s(dst.AsSRegister()), reg_s(src.AsSRegister()));
317 } else {
318 CHECK(dst.IsDRegister()) << dst;
319 CHECK(src.IsDRegister()) << src;
320 ___ Fmov(reg_d(dst.AsDRegister()), reg_d(src.AsDRegister()));
321 }
322 }
323}
324
Ian Rogersdd7624d2014-03-14 17:43:00 -0700325void Arm64Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
326 ThreadOffset<4> tr_offs,
Serban Constantinescued8dd492014-02-11 14:15:10 +0000327 ManagedRegister m_scratch) {
328 Arm64ManagedRegister scratch = m_scratch.AsArm64();
329 CHECK(scratch.IsCoreRegister()) << scratch;
330 LoadFromOffset(scratch.AsCoreRegister(), TR, tr_offs.Int32Value());
331 StoreToOffset(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
332}
333
Ian Rogersdd7624d2014-03-14 17:43:00 -0700334void Arm64Assembler::CopyRawPtrToThread32(ThreadOffset<4> tr_offs,
Serban Constantinescued8dd492014-02-11 14:15:10 +0000335 FrameOffset fr_offs,
336 ManagedRegister m_scratch) {
337 Arm64ManagedRegister scratch = m_scratch.AsArm64();
338 CHECK(scratch.IsCoreRegister()) << scratch;
339 LoadFromOffset(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
340 StoreToOffset(scratch.AsCoreRegister(), TR, tr_offs.Int32Value());
341}
342
343void Arm64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
344 ManagedRegister m_scratch) {
345 Arm64ManagedRegister scratch = m_scratch.AsArm64();
346 CHECK(scratch.IsCoreRegister()) << scratch;
347 LoadFromOffset(scratch.AsCoreRegister(), SP, src.Int32Value());
348 StoreToOffset(scratch.AsCoreRegister(), SP, dest.Int32Value());
349}
350
351void Arm64Assembler::Copy(FrameOffset dest, FrameOffset src,
352 ManagedRegister m_scratch, size_t size) {
353 Arm64ManagedRegister scratch = m_scratch.AsArm64();
354 CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch;
355 CHECK(size == 4 || size == 8) << size;
356 if (size == 4) {
357 LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value());
358 StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value());
359 } else if (size == 8) {
360 LoadFromOffset(scratch.AsCoreRegister(), SP, src.Int32Value());
361 StoreToOffset(scratch.AsCoreRegister(), SP, dest.Int32Value());
362 } else {
363 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
364 }
365}
366
367void Arm64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
368 ManagedRegister m_scratch, size_t size) {
369 Arm64ManagedRegister scratch = m_scratch.AsArm64();
370 Arm64ManagedRegister base = src_base.AsArm64();
371 CHECK(base.IsCoreRegister()) << base;
372 CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch;
373 CHECK(size == 4 || size == 8) << size;
374 if (size == 4) {
375 LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsCoreRegister(),
376 src_offset.Int32Value());
377 StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value());
378 } else if (size == 8) {
379 LoadFromOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), src_offset.Int32Value());
380 StoreToOffset(scratch.AsCoreRegister(), SP, dest.Int32Value());
381 } else {
382 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
383 }
384}
385
386void Arm64Assembler::Copy(ManagedRegister m_dest_base, Offset dest_offs, FrameOffset src,
387 ManagedRegister m_scratch, size_t size) {
388 Arm64ManagedRegister scratch = m_scratch.AsArm64();
389 Arm64ManagedRegister base = m_dest_base.AsArm64();
390 CHECK(base.IsCoreRegister()) << base;
391 CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch;
392 CHECK(size == 4 || size == 8) << size;
393 if (size == 4) {
394 LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value());
395 StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsCoreRegister(),
396 dest_offs.Int32Value());
397 } else if (size == 8) {
398 LoadFromOffset(scratch.AsCoreRegister(), SP, src.Int32Value());
399 StoreToOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), dest_offs.Int32Value());
400 } else {
401 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
402 }
403}
404
405void Arm64Assembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
406 ManagedRegister /*mscratch*/, size_t /*size*/) {
407 UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
408}
409
410void Arm64Assembler::Copy(ManagedRegister m_dest, Offset dest_offset,
411 ManagedRegister m_src, Offset src_offset,
412 ManagedRegister m_scratch, size_t size) {
413 Arm64ManagedRegister scratch = m_scratch.AsArm64();
414 Arm64ManagedRegister src = m_src.AsArm64();
415 Arm64ManagedRegister dest = m_dest.AsArm64();
416 CHECK(dest.IsCoreRegister()) << dest;
417 CHECK(src.IsCoreRegister()) << src;
418 CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch;
419 CHECK(size == 4 || size == 8) << size;
420 if (size == 4) {
421 LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsCoreRegister(),
422 src_offset.Int32Value());
423 StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsCoreRegister(),
424 dest_offset.Int32Value());
425 } else if (size == 8) {
426 LoadFromOffset(scratch.AsCoreRegister(), src.AsCoreRegister(), src_offset.Int32Value());
427 StoreToOffset(scratch.AsCoreRegister(), dest.AsCoreRegister(), dest_offset.Int32Value());
428 } else {
429 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
430 }
431}
432
433void Arm64Assembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/,
434 FrameOffset /*src*/, Offset /*src_offset*/,
435 ManagedRegister /*scratch*/, size_t /*size*/) {
436 UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
437}
438
439void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch) {
440 // TODO: Should we check that m_scratch is IP? - see arm.
441#if ANDROID_SMP != 0
442 ___ Dmb(vixl::InnerShareable, vixl::BarrierAll);
443#endif
444}
445
446void Arm64Assembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
447 UNIMPLEMENTED(FATAL) << "no sign extension necessary for Arm64";
448}
449
450void Arm64Assembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
451 UNIMPLEMENTED(FATAL) << "no zero extension necessary for Arm64";
452}
453
454void Arm64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
455 // TODO: not validating references.
456}
457
458void Arm64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
459 // TODO: not validating references.
460}
461
462void Arm64Assembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
463 Arm64ManagedRegister base = m_base.AsArm64();
464 Arm64ManagedRegister scratch = m_scratch.AsArm64();
465 CHECK(base.IsCoreRegister()) << base;
466 CHECK(scratch.IsCoreRegister()) << scratch;
467 LoadFromOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value());
468 ___ Blr(reg_x(scratch.AsCoreRegister()));
469}
470
Andreas Gampec6ee54e2014-03-24 16:45:44 -0700471void Arm64Assembler::JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
472 Arm64ManagedRegister base = m_base.AsArm64();
473 Arm64ManagedRegister scratch = m_scratch.AsArm64();
474 CHECK(base.IsCoreRegister()) << base;
475 CHECK(scratch.IsCoreRegister()) << scratch;
476 LoadFromOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value());
477 ___ Br(reg_x(scratch.AsCoreRegister()));
478}
479
Serban Constantinescued8dd492014-02-11 14:15:10 +0000480void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) {
481 Arm64ManagedRegister scratch = m_scratch.AsArm64();
482 CHECK(scratch.IsCoreRegister()) << scratch;
483 // Call *(*(SP + base) + offset)
484 LoadFromOffset(scratch.AsCoreRegister(), SP, base.Int32Value());
485 LoadFromOffset(scratch.AsCoreRegister(), scratch.AsCoreRegister(), offs.Int32Value());
486 ___ Blr(reg_x(scratch.AsCoreRegister()));
487}
488
Ian Rogersdd7624d2014-03-14 17:43:00 -0700489void Arm64Assembler::CallFromThread32(ThreadOffset<4> /*offset*/, ManagedRegister /*scratch*/) {
Serban Constantinescued8dd492014-02-11 14:15:10 +0000490 UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
491}
492
493void Arm64Assembler::CreateSirtEntry(ManagedRegister m_out_reg, FrameOffset sirt_offs,
494 ManagedRegister m_in_reg, bool null_allowed) {
495 Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
496 Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
497 // For now we only hold stale sirt entries in x registers.
498 CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
499 CHECK(out_reg.IsCoreRegister()) << out_reg;
500 if (null_allowed) {
501 // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is
502 // the address in the SIRT holding the reference.
503 // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
504 if (in_reg.IsNoRegister()) {
505 LoadFromOffset(out_reg.AsCoreRegister(), SP, sirt_offs.Int32Value());
506 in_reg = out_reg;
507 }
508 ___ Cmp(reg_x(in_reg.AsCoreRegister()), 0);
509 if (!out_reg.Equals(in_reg)) {
510 LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
511 }
512 AddConstant(out_reg.AsCoreRegister(), SP, sirt_offs.Int32Value(), NE);
513 } else {
514 AddConstant(out_reg.AsCoreRegister(), SP, sirt_offs.Int32Value(), AL);
515 }
516}
517
518void Arm64Assembler::CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
519 ManagedRegister m_scratch, bool null_allowed) {
520 Arm64ManagedRegister scratch = m_scratch.AsArm64();
521 CHECK(scratch.IsCoreRegister()) << scratch;
522 if (null_allowed) {
523 LoadFromOffset(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value());
524 // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is
525 // the address in the SIRT holding the reference.
526 // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
527 ___ Cmp(reg_x(scratch.AsCoreRegister()), 0);
528 // Move this logic in add constants with flags.
529 AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
530 } else {
531 AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
532 }
533 StoreToOffset(scratch.AsCoreRegister(), SP, out_off.Int32Value());
534}
535
536void Arm64Assembler::LoadReferenceFromSirt(ManagedRegister m_out_reg,
537 ManagedRegister m_in_reg) {
538 Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
539 Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
540 CHECK(out_reg.IsCoreRegister()) << out_reg;
541 CHECK(in_reg.IsCoreRegister()) << in_reg;
542 vixl::Label exit;
543 if (!out_reg.Equals(in_reg)) {
544 // FIXME: Who sets the flags here?
545 LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
546 }
547 ___ Cmp(reg_x(in_reg.AsCoreRegister()), 0);
548 ___ B(&exit, COND_OP(EQ));
549 LoadFromOffset(out_reg.AsCoreRegister(), in_reg.AsCoreRegister(), 0);
550 ___ Bind(&exit);
551}
552
553void Arm64Assembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
554 CHECK_ALIGNED(stack_adjust, kStackAlignment);
555 Arm64ManagedRegister scratch = m_scratch.AsArm64();
556 Arm64Exception *current_exception = new Arm64Exception(scratch, stack_adjust);
557 exception_blocks_.push_back(current_exception);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700558 LoadFromOffset(scratch.AsCoreRegister(), TR, Thread::ExceptionOffset<4>().Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000559 ___ Cmp(reg_x(scratch.AsCoreRegister()), 0);
560 ___ B(current_exception->Entry(), COND_OP(NE));
561}
562
563void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) {
564 // Bind exception poll entry.
565 ___ Bind(exception->Entry());
566 if (exception->stack_adjust_ != 0) { // Fix up the frame.
567 DecreaseFrameSize(exception->stack_adjust_);
568 }
569 // Pass exception object as argument.
570 // Don't care about preserving X0 as this won't return.
571 ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsCoreRegister()));
Ian Rogersdd7624d2014-03-14 17:43:00 -0700572 LoadFromOffset(IP1, TR, QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000573 ___ Blr(reg_x(IP1));
574 // Call should never return.
575 ___ Brk();
576}
577
578void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
579 const std::vector<ManagedRegister>& callee_save_regs,
Dmitry Petrochenkofca82202014-03-21 11:21:37 +0700580 const ManagedRegisterEntrySpills& entry_spills) {
Serban Constantinescued8dd492014-02-11 14:15:10 +0000581 CHECK_ALIGNED(frame_size, kStackAlignment);
582 CHECK(X0 == method_reg.AsArm64().AsCoreRegister());
583
584 // TODO: *create APCS FP - end of FP chain;
585 // *add support for saving a different set of callee regs.
586 // For now we check that the size of callee regs vector is 20
587 // equivalent to the APCS callee saved regs [X19, x30] [D8, D15].
588 CHECK_EQ(callee_save_regs.size(), kCalleeSavedRegsSize);
589 ___ PushCalleeSavedRegisters();
590
591 // Increate frame to required size - must be at least space to push Method*.
592 CHECK_GT(frame_size, kCalleeSavedRegsSize * kPointerSize);
593 size_t adjust = frame_size - (kCalleeSavedRegsSize * kPointerSize);
594 IncreaseFrameSize(adjust);
595
596 // Write Method*.
597 StoreToOffset(X0, SP, 0);
598
599 // Write out entry spills, treated as X regs.
600 // TODO: we can implement a %2 STRP variant of StoreToOffset.
601 for (size_t i = 0; i < entry_spills.size(); ++i) {
602 Register reg = entry_spills.at(i).AsArm64().AsCoreRegister();
603 StoreToOffset(reg, SP, frame_size + kPointerSize + (i * kPointerSize));
604 }
605}
606
607void Arm64Assembler::RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& callee_save_regs) {
608 CHECK_ALIGNED(frame_size, kStackAlignment);
609
610 // For now we only check that the size of the frame is greater than the
611 // no of APCS callee saved regs [X19, X30] [D8, D15].
612 CHECK_EQ(callee_save_regs.size(), kCalleeSavedRegsSize);
613 CHECK_GT(frame_size, kCalleeSavedRegsSize * kPointerSize);
614
615 // Decrease frame size to start of callee saved regs.
616 size_t adjust = frame_size - (kCalleeSavedRegsSize * kPointerSize);
617 DecreaseFrameSize(adjust);
618
619 // Pop callee saved and return to LR.
620 ___ PopCalleeSavedRegisters();
621 ___ Ret();
622}
623
624} // namespace arm64
625} // namespace art