blob: e4f2518a16ab668181d6992009ae57ef4b3511e3 [file] [log] [blame]
Andreas Gampe71fb52f2014-12-29 17:43:08 -08001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "intrinsics_x86_64.h"
18
19#include "code_generator_x86_64.h"
20#include "entrypoints/quick/quick_entrypoints.h"
21#include "intrinsics.h"
22#include "mirror/array-inl.h"
23#include "mirror/art_method.h"
24#include "mirror/string.h"
25#include "thread.h"
26#include "utils/x86_64/assembler_x86_64.h"
27#include "utils/x86_64/constants_x86_64.h"
28
29namespace art {
30
31namespace x86_64 {
32
Andreas Gampe71fb52f2014-12-29 17:43:08 -080033X86_64Assembler* IntrinsicCodeGeneratorX86_64::GetAssembler() {
34 return reinterpret_cast<X86_64Assembler*>(codegen_->GetAssembler());
35}
36
Andreas Gampe878d58c2015-01-15 23:24:00 -080037ArenaAllocator* IntrinsicCodeGeneratorX86_64::GetAllocator() {
Andreas Gampe71fb52f2014-12-29 17:43:08 -080038 return codegen_->GetGraph()->GetArena();
39}
40
41bool IntrinsicLocationsBuilderX86_64::TryDispatch(HInvoke* invoke) {
42 Dispatch(invoke);
43 const LocationSummary* res = invoke->GetLocations();
44 return res != nullptr && res->Intrinsified();
45}
46
47#define __ reinterpret_cast<X86_64Assembler*>(codegen->GetAssembler())->
48
49// TODO: trg as memory.
50static void MoveFromReturnRegister(Location trg,
51 Primitive::Type type,
52 CodeGeneratorX86_64* codegen) {
53 if (!trg.IsValid()) {
54 DCHECK(type == Primitive::kPrimVoid);
55 return;
56 }
57
58 switch (type) {
59 case Primitive::kPrimBoolean:
60 case Primitive::kPrimByte:
61 case Primitive::kPrimChar:
62 case Primitive::kPrimShort:
63 case Primitive::kPrimInt:
64 case Primitive::kPrimNot: {
65 CpuRegister trg_reg = trg.AsRegister<CpuRegister>();
66 if (trg_reg.AsRegister() != RAX) {
67 __ movl(trg_reg, CpuRegister(RAX));
68 }
69 break;
70 }
71 case Primitive::kPrimLong: {
72 CpuRegister trg_reg = trg.AsRegister<CpuRegister>();
73 if (trg_reg.AsRegister() != RAX) {
74 __ movq(trg_reg, CpuRegister(RAX));
75 }
76 break;
77 }
78
79 case Primitive::kPrimVoid:
80 LOG(FATAL) << "Unexpected void type for valid location " << trg;
81 UNREACHABLE();
82
83 case Primitive::kPrimDouble: {
84 XmmRegister trg_reg = trg.AsFpuRegister<XmmRegister>();
85 if (trg_reg.AsFloatRegister() != XMM0) {
86 __ movsd(trg_reg, XmmRegister(XMM0));
87 }
88 break;
89 }
90 case Primitive::kPrimFloat: {
91 XmmRegister trg_reg = trg.AsFpuRegister<XmmRegister>();
92 if (trg_reg.AsFloatRegister() != XMM0) {
93 __ movss(trg_reg, XmmRegister(XMM0));
94 }
95 break;
96 }
97 }
98}
99
100static void MoveArguments(HInvoke* invoke, ArenaAllocator* arena, CodeGeneratorX86_64* codegen) {
101 if (invoke->InputCount() == 0) {
102 return;
103 }
104
105 LocationSummary* locations = invoke->GetLocations();
106 InvokeDexCallingConventionVisitor calling_convention_visitor;
107
108 // We're moving potentially two or more locations to locations that could overlap, so we need
109 // a parallel move resolver.
110 HParallelMove parallel_move(arena);
111
112 for (size_t i = 0; i < invoke->InputCount(); i++) {
113 HInstruction* input = invoke->InputAt(i);
114 Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType());
115 Location actual_loc = locations->InAt(i);
116
Nicolas Geoffray42d1f5f2015-01-16 09:14:18 +0000117 parallel_move.AddMove(actual_loc, cc_loc, nullptr);
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800118 }
119
120 codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
121}
122
123// Slow-path for fallback (calling the managed code to handle the intrinsic) in an intrinsified
124// call. This will copy the arguments into the positions for a regular call.
125//
126// Note: The actual parameters are required to be in the locations given by the invoke's location
127// summary. If an intrinsic modifies those locations before a slowpath call, they must be
128// restored!
129class IntrinsicSlowPathX86_64 : public SlowPathCodeX86_64 {
130 public:
131 explicit IntrinsicSlowPathX86_64(HInvoke* invoke) : invoke_(invoke) { }
132
133 void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
134 CodeGeneratorX86_64* codegen = down_cast<CodeGeneratorX86_64*>(codegen_in);
135 __ Bind(GetEntryLabel());
136
Nicolas Geoffraya8ac9132015-03-13 16:36:36 +0000137 SaveLiveRegisters(codegen, invoke_->GetLocations());
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800138
139 MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
140
141 if (invoke_->IsInvokeStaticOrDirect()) {
142 codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), CpuRegister(RDI));
Nicolas Geoffraya8ac9132015-03-13 16:36:36 +0000143 RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800144 } else {
145 UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
146 UNREACHABLE();
147 }
148
149 // Copy the result back to the expected output.
150 Location out = invoke_->GetLocations()->Out();
151 if (out.IsValid()) {
152 DCHECK(out.IsRegister()); // TODO: Replace this when we support output in memory.
153 DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
154 MoveFromReturnRegister(out, invoke_->GetType(), codegen);
155 }
156
Nicolas Geoffraya8ac9132015-03-13 16:36:36 +0000157 RestoreLiveRegisters(codegen, invoke_->GetLocations());
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800158 __ jmp(GetExitLabel());
159 }
160
161 private:
162 // The instruction where this slow path is happening.
163 HInvoke* const invoke_;
164
165 DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathX86_64);
166};
167
168#undef __
169#define __ assembler->
170
171static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
172 LocationSummary* locations = new (arena) LocationSummary(invoke,
173 LocationSummary::kNoCall,
174 kIntrinsified);
175 locations->SetInAt(0, Location::RequiresFpuRegister());
176 locations->SetOut(Location::RequiresRegister());
177}
178
179static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
180 LocationSummary* locations = new (arena) LocationSummary(invoke,
181 LocationSummary::kNoCall,
182 kIntrinsified);
183 locations->SetInAt(0, Location::RequiresRegister());
184 locations->SetOut(Location::RequiresFpuRegister());
185}
186
187static void MoveFPToInt(LocationSummary* locations, bool is64bit, X86_64Assembler* assembler) {
188 Location input = locations->InAt(0);
189 Location output = locations->Out();
190 __ movd(output.AsRegister<CpuRegister>(), input.AsFpuRegister<XmmRegister>(), is64bit);
191}
192
193static void MoveIntToFP(LocationSummary* locations, bool is64bit, X86_64Assembler* assembler) {
194 Location input = locations->InAt(0);
195 Location output = locations->Out();
196 __ movd(output.AsFpuRegister<XmmRegister>(), input.AsRegister<CpuRegister>(), is64bit);
197}
198
199void IntrinsicLocationsBuilderX86_64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
200 CreateFPToIntLocations(arena_, invoke);
201}
202void IntrinsicLocationsBuilderX86_64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
203 CreateIntToFPLocations(arena_, invoke);
204}
205
206void IntrinsicCodeGeneratorX86_64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
207 MoveFPToInt(invoke->GetLocations(), true, GetAssembler());
208}
209void IntrinsicCodeGeneratorX86_64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
210 MoveIntToFP(invoke->GetLocations(), true, GetAssembler());
211}
212
213void IntrinsicLocationsBuilderX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
214 CreateFPToIntLocations(arena_, invoke);
215}
216void IntrinsicLocationsBuilderX86_64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
217 CreateIntToFPLocations(arena_, invoke);
218}
219
220void IntrinsicCodeGeneratorX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
221 MoveFPToInt(invoke->GetLocations(), false, GetAssembler());
222}
223void IntrinsicCodeGeneratorX86_64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
224 MoveIntToFP(invoke->GetLocations(), false, GetAssembler());
225}
226
227static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
228 LocationSummary* locations = new (arena) LocationSummary(invoke,
229 LocationSummary::kNoCall,
230 kIntrinsified);
231 locations->SetInAt(0, Location::RequiresRegister());
232 locations->SetOut(Location::SameAsFirstInput());
233}
234
235static void GenReverseBytes(LocationSummary* locations,
236 Primitive::Type size,
237 X86_64Assembler* assembler) {
238 CpuRegister out = locations->Out().AsRegister<CpuRegister>();
239
240 switch (size) {
241 case Primitive::kPrimShort:
242 // TODO: Can be done with an xchg of 8b registers. This is straight from Quick.
243 __ bswapl(out);
244 __ sarl(out, Immediate(16));
245 break;
246 case Primitive::kPrimInt:
247 __ bswapl(out);
248 break;
249 case Primitive::kPrimLong:
250 __ bswapq(out);
251 break;
252 default:
253 LOG(FATAL) << "Unexpected size for reverse-bytes: " << size;
254 UNREACHABLE();
255 }
256}
257
258void IntrinsicLocationsBuilderX86_64::VisitIntegerReverseBytes(HInvoke* invoke) {
259 CreateIntToIntLocations(arena_, invoke);
260}
261
262void IntrinsicCodeGeneratorX86_64::VisitIntegerReverseBytes(HInvoke* invoke) {
263 GenReverseBytes(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
264}
265
266void IntrinsicLocationsBuilderX86_64::VisitLongReverseBytes(HInvoke* invoke) {
267 CreateIntToIntLocations(arena_, invoke);
268}
269
270void IntrinsicCodeGeneratorX86_64::VisitLongReverseBytes(HInvoke* invoke) {
271 GenReverseBytes(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
272}
273
274void IntrinsicLocationsBuilderX86_64::VisitShortReverseBytes(HInvoke* invoke) {
275 CreateIntToIntLocations(arena_, invoke);
276}
277
278void IntrinsicCodeGeneratorX86_64::VisitShortReverseBytes(HInvoke* invoke) {
279 GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
280}
281
282
283// TODO: Consider Quick's way of doing Double abs through integer operations, as the immediate we
284// need is 64b.
285
286static void CreateFloatToFloatPlusTemps(ArenaAllocator* arena, HInvoke* invoke) {
287 // TODO: Enable memory operations when the assembler supports them.
288 LocationSummary* locations = new (arena) LocationSummary(invoke,
289 LocationSummary::kNoCall,
290 kIntrinsified);
291 locations->SetInAt(0, Location::RequiresFpuRegister());
292 // TODO: Allow x86 to work with memory. This requires assembler support, see below.
293 // locations->SetInAt(0, Location::Any()); // X86 can work on memory directly.
294 locations->SetOut(Location::SameAsFirstInput());
295 locations->AddTemp(Location::RequiresRegister()); // Immediate constant.
296 locations->AddTemp(Location::RequiresFpuRegister()); // FP version of above.
297}
298
299static void MathAbsFP(LocationSummary* locations, bool is64bit, X86_64Assembler* assembler) {
300 Location output = locations->Out();
301 CpuRegister cpu_temp = locations->GetTemp(0).AsRegister<CpuRegister>();
302
303 if (output.IsFpuRegister()) {
304 // In-register
305 XmmRegister xmm_temp = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
306
307 if (is64bit) {
308 __ movq(cpu_temp, Immediate(INT64_C(0x7FFFFFFFFFFFFFFF)));
309 __ movd(xmm_temp, cpu_temp);
310 __ andpd(output.AsFpuRegister<XmmRegister>(), xmm_temp);
311 } else {
312 __ movl(cpu_temp, Immediate(INT64_C(0x7FFFFFFF)));
313 __ movd(xmm_temp, cpu_temp);
314 __ andps(output.AsFpuRegister<XmmRegister>(), xmm_temp);
315 }
316 } else {
317 // TODO: update when assember support is available.
318 UNIMPLEMENTED(FATAL) << "Needs assembler support.";
319// Once assembler support is available, in-memory operations look like this:
320// if (is64bit) {
321// DCHECK(output.IsDoubleStackSlot());
322// // No 64b and with literal.
323// __ movq(cpu_temp, Immediate(INT64_C(0x7FFFFFFFFFFFFFFF)));
324// __ andq(Address(CpuRegister(RSP), output.GetStackIndex()), cpu_temp);
325// } else {
326// DCHECK(output.IsStackSlot());
327// // Can use and with a literal directly.
328// __ andl(Address(CpuRegister(RSP), output.GetStackIndex()), Immediate(INT64_C(0x7FFFFFFF)));
329// }
330 }
331}
332
333void IntrinsicLocationsBuilderX86_64::VisitMathAbsDouble(HInvoke* invoke) {
334 CreateFloatToFloatPlusTemps(arena_, invoke);
335}
336
337void IntrinsicCodeGeneratorX86_64::VisitMathAbsDouble(HInvoke* invoke) {
338 MathAbsFP(invoke->GetLocations(), true, GetAssembler());
339}
340
341void IntrinsicLocationsBuilderX86_64::VisitMathAbsFloat(HInvoke* invoke) {
342 CreateFloatToFloatPlusTemps(arena_, invoke);
343}
344
345void IntrinsicCodeGeneratorX86_64::VisitMathAbsFloat(HInvoke* invoke) {
346 MathAbsFP(invoke->GetLocations(), false, GetAssembler());
347}
348
349static void CreateIntToIntPlusTemp(ArenaAllocator* arena, HInvoke* invoke) {
350 LocationSummary* locations = new (arena) LocationSummary(invoke,
351 LocationSummary::kNoCall,
352 kIntrinsified);
353 locations->SetInAt(0, Location::RequiresRegister());
354 locations->SetOut(Location::SameAsFirstInput());
355 locations->AddTemp(Location::RequiresRegister());
356}
357
358static void GenAbsInteger(LocationSummary* locations, bool is64bit, X86_64Assembler* assembler) {
359 Location output = locations->Out();
360 CpuRegister out = output.AsRegister<CpuRegister>();
361 CpuRegister mask = locations->GetTemp(0).AsRegister<CpuRegister>();
362
363 if (is64bit) {
364 // Create mask.
365 __ movq(mask, out);
366 __ sarq(mask, Immediate(63));
367 // Add mask.
368 __ addq(out, mask);
369 __ xorq(out, mask);
370 } else {
371 // Create mask.
372 __ movl(mask, out);
373 __ sarl(mask, Immediate(31));
374 // Add mask.
375 __ addl(out, mask);
376 __ xorl(out, mask);
377 }
378}
379
380void IntrinsicLocationsBuilderX86_64::VisitMathAbsInt(HInvoke* invoke) {
381 CreateIntToIntPlusTemp(arena_, invoke);
382}
383
384void IntrinsicCodeGeneratorX86_64::VisitMathAbsInt(HInvoke* invoke) {
385 GenAbsInteger(invoke->GetLocations(), false, GetAssembler());
386}
387
388void IntrinsicLocationsBuilderX86_64::VisitMathAbsLong(HInvoke* invoke) {
389 CreateIntToIntPlusTemp(arena_, invoke);
390}
391
392void IntrinsicCodeGeneratorX86_64::VisitMathAbsLong(HInvoke* invoke) {
393 GenAbsInteger(invoke->GetLocations(), true, GetAssembler());
394}
395
396static void GenMinMaxFP(LocationSummary* locations, bool is_min, bool is_double,
397 X86_64Assembler* assembler) {
398 Location op1_loc = locations->InAt(0);
399 Location op2_loc = locations->InAt(1);
400 Location out_loc = locations->Out();
401 XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
402
403 // Shortcut for same input locations.
404 if (op1_loc.Equals(op2_loc)) {
405 DCHECK(out_loc.Equals(op1_loc));
406 return;
407 }
408
409 // (out := op1)
410 // out <=? op2
411 // if Nan jmp Nan_label
412 // if out is min jmp done
413 // if op2 is min jmp op2_label
414 // handle -0/+0
415 // jmp done
416 // Nan_label:
417 // out := NaN
418 // op2_label:
419 // out := op2
420 // done:
421 //
422 // This removes one jmp, but needs to copy one input (op1) to out.
423 //
424 // TODO: This is straight from Quick (except literal pool). Make NaN an out-of-line slowpath?
425
426 XmmRegister op2 = op2_loc.AsFpuRegister<XmmRegister>();
427
428 Label nan, done, op2_label;
429 if (is_double) {
430 __ ucomisd(out, op2);
431 } else {
432 __ ucomiss(out, op2);
433 }
434
435 __ j(Condition::kParityEven, &nan);
436
437 __ j(is_min ? Condition::kAbove : Condition::kBelow, &op2_label);
438 __ j(is_min ? Condition::kBelow : Condition::kAbove, &done);
439
440 // Handle 0.0/-0.0.
441 if (is_min) {
442 if (is_double) {
443 __ orpd(out, op2);
444 } else {
445 __ orps(out, op2);
446 }
447 } else {
448 if (is_double) {
449 __ andpd(out, op2);
450 } else {
451 __ andps(out, op2);
452 }
453 }
454 __ jmp(&done);
455
456 // NaN handling.
457 __ Bind(&nan);
458 CpuRegister cpu_temp = locations->GetTemp(0).AsRegister<CpuRegister>();
459 // TODO: Literal pool. Trades 64b immediate in CPU reg for direct memory access.
460 if (is_double) {
461 __ movq(cpu_temp, Immediate(INT64_C(0x7FF8000000000000)));
462 } else {
463 __ movl(cpu_temp, Immediate(INT64_C(0x7FC00000)));
464 }
465 __ movd(out, cpu_temp, is_double);
466 __ jmp(&done);
467
468 // out := op2;
469 __ Bind(&op2_label);
470 if (is_double) {
471 __ movsd(out, op2);
472 } else {
473 __ movss(out, op2);
474 }
475
476 // Done.
477 __ Bind(&done);
478}
479
480static void CreateFPFPToFPPlusTempLocations(ArenaAllocator* arena, HInvoke* invoke) {
481 LocationSummary* locations = new (arena) LocationSummary(invoke,
482 LocationSummary::kNoCall,
483 kIntrinsified);
484 locations->SetInAt(0, Location::RequiresFpuRegister());
485 locations->SetInAt(1, Location::RequiresFpuRegister());
486 // The following is sub-optimal, but all we can do for now. It would be fine to also accept
487 // the second input to be the output (we can simply swap inputs).
488 locations->SetOut(Location::SameAsFirstInput());
489 locations->AddTemp(Location::RequiresRegister()); // Immediate constant.
490}
491
492void IntrinsicLocationsBuilderX86_64::VisitMathMinDoubleDouble(HInvoke* invoke) {
493 CreateFPFPToFPPlusTempLocations(arena_, invoke);
494}
495
496void IntrinsicCodeGeneratorX86_64::VisitMathMinDoubleDouble(HInvoke* invoke) {
497 GenMinMaxFP(invoke->GetLocations(), true, true, GetAssembler());
498}
499
500void IntrinsicLocationsBuilderX86_64::VisitMathMinFloatFloat(HInvoke* invoke) {
501 CreateFPFPToFPPlusTempLocations(arena_, invoke);
502}
503
504void IntrinsicCodeGeneratorX86_64::VisitMathMinFloatFloat(HInvoke* invoke) {
505 GenMinMaxFP(invoke->GetLocations(), true, false, GetAssembler());
506}
507
508void IntrinsicLocationsBuilderX86_64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
509 CreateFPFPToFPPlusTempLocations(arena_, invoke);
510}
511
512void IntrinsicCodeGeneratorX86_64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
513 GenMinMaxFP(invoke->GetLocations(), false, true, GetAssembler());
514}
515
516void IntrinsicLocationsBuilderX86_64::VisitMathMaxFloatFloat(HInvoke* invoke) {
517 CreateFPFPToFPPlusTempLocations(arena_, invoke);
518}
519
520void IntrinsicCodeGeneratorX86_64::VisitMathMaxFloatFloat(HInvoke* invoke) {
521 GenMinMaxFP(invoke->GetLocations(), false, false, GetAssembler());
522}
523
524static void GenMinMax(LocationSummary* locations, bool is_min, bool is_long,
525 X86_64Assembler* assembler) {
526 Location op1_loc = locations->InAt(0);
527 Location op2_loc = locations->InAt(1);
528
529 // Shortcut for same input locations.
530 if (op1_loc.Equals(op2_loc)) {
531 // Can return immediately, as op1_loc == out_loc.
532 // Note: if we ever support separate registers, e.g., output into memory, we need to check for
533 // a copy here.
534 DCHECK(locations->Out().Equals(op1_loc));
535 return;
536 }
537
538 CpuRegister out = locations->Out().AsRegister<CpuRegister>();
539 CpuRegister op2 = op2_loc.AsRegister<CpuRegister>();
540
541 // (out := op1)
542 // out <=? op2
543 // if out is min jmp done
544 // out := op2
545 // done:
546
547 if (is_long) {
548 __ cmpq(out, op2);
549 } else {
550 __ cmpl(out, op2);
551 }
552
553 __ cmov(is_min ? Condition::kGreater : Condition::kLess, out, op2, is_long);
554}
555
556static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
557 LocationSummary* locations = new (arena) LocationSummary(invoke,
558 LocationSummary::kNoCall,
559 kIntrinsified);
560 locations->SetInAt(0, Location::RequiresRegister());
561 locations->SetInAt(1, Location::RequiresRegister());
562 locations->SetOut(Location::SameAsFirstInput());
563}
564
565void IntrinsicLocationsBuilderX86_64::VisitMathMinIntInt(HInvoke* invoke) {
566 CreateIntIntToIntLocations(arena_, invoke);
567}
568
569void IntrinsicCodeGeneratorX86_64::VisitMathMinIntInt(HInvoke* invoke) {
570 GenMinMax(invoke->GetLocations(), true, false, GetAssembler());
571}
572
573void IntrinsicLocationsBuilderX86_64::VisitMathMinLongLong(HInvoke* invoke) {
574 CreateIntIntToIntLocations(arena_, invoke);
575}
576
577void IntrinsicCodeGeneratorX86_64::VisitMathMinLongLong(HInvoke* invoke) {
578 GenMinMax(invoke->GetLocations(), true, true, GetAssembler());
579}
580
581void IntrinsicLocationsBuilderX86_64::VisitMathMaxIntInt(HInvoke* invoke) {
582 CreateIntIntToIntLocations(arena_, invoke);
583}
584
585void IntrinsicCodeGeneratorX86_64::VisitMathMaxIntInt(HInvoke* invoke) {
586 GenMinMax(invoke->GetLocations(), false, false, GetAssembler());
587}
588
589void IntrinsicLocationsBuilderX86_64::VisitMathMaxLongLong(HInvoke* invoke) {
590 CreateIntIntToIntLocations(arena_, invoke);
591}
592
593void IntrinsicCodeGeneratorX86_64::VisitMathMaxLongLong(HInvoke* invoke) {
594 GenMinMax(invoke->GetLocations(), false, true, GetAssembler());
595}
596
597static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
598 LocationSummary* locations = new (arena) LocationSummary(invoke,
599 LocationSummary::kNoCall,
600 kIntrinsified);
601 locations->SetInAt(0, Location::RequiresFpuRegister());
602 locations->SetOut(Location::RequiresFpuRegister());
603}
604
605void IntrinsicLocationsBuilderX86_64::VisitMathSqrt(HInvoke* invoke) {
606 CreateFPToFPLocations(arena_, invoke);
607}
608
609void IntrinsicCodeGeneratorX86_64::VisitMathSqrt(HInvoke* invoke) {
610 LocationSummary* locations = invoke->GetLocations();
611 XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
612 XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
613
614 GetAssembler()->sqrtsd(out, in);
615}
616
617void IntrinsicLocationsBuilderX86_64::VisitStringCharAt(HInvoke* invoke) {
618 // The inputs plus one temp.
619 LocationSummary* locations = new (arena_) LocationSummary(invoke,
620 LocationSummary::kCallOnSlowPath,
621 kIntrinsified);
622 locations->SetInAt(0, Location::RequiresRegister());
623 locations->SetInAt(1, Location::RequiresRegister());
624 locations->SetOut(Location::SameAsFirstInput());
625 locations->AddTemp(Location::RequiresRegister());
626}
627
628void IntrinsicCodeGeneratorX86_64::VisitStringCharAt(HInvoke* invoke) {
629 LocationSummary* locations = invoke->GetLocations();
630
631 // Location of reference to data array
632 const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
633 // Location of count
634 const int32_t count_offset = mirror::String::CountOffset().Int32Value();
635 // Starting offset within data array
636 const int32_t offset_offset = mirror::String::OffsetOffset().Int32Value();
637 // Start of char data with array_
638 const int32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
639
640 CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
641 CpuRegister idx = locations->InAt(1).AsRegister<CpuRegister>();
642 CpuRegister out = locations->Out().AsRegister<CpuRegister>();
643 Location temp_loc = locations->GetTemp(0);
644 CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
645
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800646 // TODO: Maybe we can support range check elimination. Overall, though, I think it's not worth
647 // the cost.
648 // TODO: For simplicity, the index parameter is requested in a register, so different from Quick
649 // we will not optimize the code for constants (which would save a register).
650
Andreas Gampe878d58c2015-01-15 23:24:00 -0800651 SlowPathCodeX86_64* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800652 codegen_->AddSlowPath(slow_path);
653
654 X86_64Assembler* assembler = GetAssembler();
655
656 __ cmpl(idx, Address(obj, count_offset));
Andreas Gampe878d58c2015-01-15 23:24:00 -0800657 codegen_->MaybeRecordImplicitNullCheck(invoke);
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800658 __ j(kAboveEqual, slow_path->GetEntryLabel());
659
660 // Get the actual element.
661 __ movl(temp, idx); // temp := idx.
662 __ addl(temp, Address(obj, offset_offset)); // temp := offset + idx.
663 __ movl(out, Address(obj, value_offset)); // obj := obj.array.
664 // out = out[2*temp].
665 __ movzxw(out, Address(out, temp, ScaleFactor::TIMES_2, data_offset));
666
667 __ Bind(slow_path->GetExitLabel());
668}
669
Nicolas Geoffrayd75948a2015-03-27 09:53:16 +0000670void IntrinsicLocationsBuilderX86_64::VisitStringCompareTo(HInvoke* invoke) {
671 LocationSummary* locations = new (arena_) LocationSummary(invoke,
672 LocationSummary::kCall,
673 kIntrinsified);
674 InvokeRuntimeCallingConvention calling_convention;
675 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
676 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
677 locations->SetOut(Location::RegisterLocation(RAX));
678}
679
680void IntrinsicCodeGeneratorX86_64::VisitStringCompareTo(HInvoke* invoke) {
681 X86_64Assembler* assembler = GetAssembler();
682 LocationSummary* locations = invoke->GetLocations();
683
684 // Note that the null check must have be done earlier.
685 DCHECK(!invoke->CanDoImplicitNullCheck());
686
687 CpuRegister argument = locations->InAt(1).AsRegister<CpuRegister>();
688 __ testl(argument, argument);
689 SlowPathCodeX86_64* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
690 codegen_->AddSlowPath(slow_path);
691 __ j(kEqual, slow_path->GetEntryLabel());
692
693 __ gs()->call(Address::Absolute(
694 QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pStringCompareTo), true));
695 __ Bind(slow_path->GetExitLabel());
696}
697
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800698static void GenPeek(LocationSummary* locations, Primitive::Type size, X86_64Assembler* assembler) {
699 CpuRegister address = locations->InAt(0).AsRegister<CpuRegister>();
700 CpuRegister out = locations->Out().AsRegister<CpuRegister>(); // == address, here for clarity.
701 // x86 allows unaligned access. We do not have to check the input or use specific instructions
702 // to avoid a SIGBUS.
703 switch (size) {
704 case Primitive::kPrimByte:
705 __ movsxb(out, Address(address, 0));
706 break;
707 case Primitive::kPrimShort:
708 __ movsxw(out, Address(address, 0));
709 break;
710 case Primitive::kPrimInt:
711 __ movl(out, Address(address, 0));
712 break;
713 case Primitive::kPrimLong:
714 __ movq(out, Address(address, 0));
715 break;
716 default:
717 LOG(FATAL) << "Type not recognized for peek: " << size;
718 UNREACHABLE();
719 }
720}
721
722void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekByte(HInvoke* invoke) {
723 CreateIntToIntLocations(arena_, invoke);
724}
725
726void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekByte(HInvoke* invoke) {
727 GenPeek(invoke->GetLocations(), Primitive::kPrimByte, GetAssembler());
728}
729
730void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekIntNative(HInvoke* invoke) {
731 CreateIntToIntLocations(arena_, invoke);
732}
733
734void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekIntNative(HInvoke* invoke) {
735 GenPeek(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
736}
737
738void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekLongNative(HInvoke* invoke) {
739 CreateIntToIntLocations(arena_, invoke);
740}
741
742void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekLongNative(HInvoke* invoke) {
743 GenPeek(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
744}
745
746void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekShortNative(HInvoke* invoke) {
747 CreateIntToIntLocations(arena_, invoke);
748}
749
750void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekShortNative(HInvoke* invoke) {
751 GenPeek(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
752}
753
754static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
755 LocationSummary* locations = new (arena) LocationSummary(invoke,
756 LocationSummary::kNoCall,
757 kIntrinsified);
758 locations->SetInAt(0, Location::RequiresRegister());
759 locations->SetInAt(1, Location::RequiresRegister());
760}
761
762static void GenPoke(LocationSummary* locations, Primitive::Type size, X86_64Assembler* assembler) {
763 CpuRegister address = locations->InAt(0).AsRegister<CpuRegister>();
764 CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
765 // x86 allows unaligned access. We do not have to check the input or use specific instructions
766 // to avoid a SIGBUS.
767 switch (size) {
768 case Primitive::kPrimByte:
769 __ movb(Address(address, 0), value);
770 break;
771 case Primitive::kPrimShort:
772 __ movw(Address(address, 0), value);
773 break;
774 case Primitive::kPrimInt:
775 __ movl(Address(address, 0), value);
776 break;
777 case Primitive::kPrimLong:
778 __ movq(Address(address, 0), value);
779 break;
780 default:
781 LOG(FATAL) << "Type not recognized for poke: " << size;
782 UNREACHABLE();
783 }
784}
785
786void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeByte(HInvoke* invoke) {
787 CreateIntIntToVoidLocations(arena_, invoke);
788}
789
790void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeByte(HInvoke* invoke) {
791 GenPoke(invoke->GetLocations(), Primitive::kPrimByte, GetAssembler());
792}
793
794void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeIntNative(HInvoke* invoke) {
795 CreateIntIntToVoidLocations(arena_, invoke);
796}
797
798void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeIntNative(HInvoke* invoke) {
799 GenPoke(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
800}
801
802void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeLongNative(HInvoke* invoke) {
803 CreateIntIntToVoidLocations(arena_, invoke);
804}
805
806void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeLongNative(HInvoke* invoke) {
807 GenPoke(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
808}
809
810void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeShortNative(HInvoke* invoke) {
811 CreateIntIntToVoidLocations(arena_, invoke);
812}
813
814void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeShortNative(HInvoke* invoke) {
815 GenPoke(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
816}
817
818void IntrinsicLocationsBuilderX86_64::VisitThreadCurrentThread(HInvoke* invoke) {
819 LocationSummary* locations = new (arena_) LocationSummary(invoke,
820 LocationSummary::kNoCall,
821 kIntrinsified);
822 locations->SetOut(Location::RequiresRegister());
823}
824
825void IntrinsicCodeGeneratorX86_64::VisitThreadCurrentThread(HInvoke* invoke) {
826 CpuRegister out = invoke->GetLocations()->Out().AsRegister<CpuRegister>();
827 GetAssembler()->gs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86_64WordSize>(), true));
828}
829
Andreas Gampe878d58c2015-01-15 23:24:00 -0800830static void GenUnsafeGet(LocationSummary* locations, Primitive::Type type,
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800831 bool is_volatile ATTRIBUTE_UNUSED, X86_64Assembler* assembler) {
832 CpuRegister base = locations->InAt(1).AsRegister<CpuRegister>();
833 CpuRegister offset = locations->InAt(2).AsRegister<CpuRegister>();
834 CpuRegister trg = locations->Out().AsRegister<CpuRegister>();
835
Andreas Gampe878d58c2015-01-15 23:24:00 -0800836 switch (type) {
837 case Primitive::kPrimInt:
838 case Primitive::kPrimNot:
839 __ movl(trg, Address(base, offset, ScaleFactor::TIMES_1, 0));
840 break;
841
842 case Primitive::kPrimLong:
843 __ movq(trg, Address(base, offset, ScaleFactor::TIMES_1, 0));
844 break;
845
846 default:
847 LOG(FATAL) << "Unsupported op size " << type;
848 UNREACHABLE();
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800849 }
850}
851
852static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
853 LocationSummary* locations = new (arena) LocationSummary(invoke,
854 LocationSummary::kNoCall,
855 kIntrinsified);
Andreas Gampe878d58c2015-01-15 23:24:00 -0800856 locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800857 locations->SetInAt(1, Location::RequiresRegister());
858 locations->SetInAt(2, Location::RequiresRegister());
Andreas Gampe878d58c2015-01-15 23:24:00 -0800859 locations->SetOut(Location::RequiresRegister());
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800860}
861
862void IntrinsicLocationsBuilderX86_64::VisitUnsafeGet(HInvoke* invoke) {
863 CreateIntIntIntToIntLocations(arena_, invoke);
864}
865void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetVolatile(HInvoke* invoke) {
866 CreateIntIntIntToIntLocations(arena_, invoke);
867}
868void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetLong(HInvoke* invoke) {
869 CreateIntIntIntToIntLocations(arena_, invoke);
870}
871void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
872 CreateIntIntIntToIntLocations(arena_, invoke);
873}
Andreas Gampe878d58c2015-01-15 23:24:00 -0800874void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetObject(HInvoke* invoke) {
875 CreateIntIntIntToIntLocations(arena_, invoke);
876}
877void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
878 CreateIntIntIntToIntLocations(arena_, invoke);
879}
880
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800881
882void IntrinsicCodeGeneratorX86_64::VisitUnsafeGet(HInvoke* invoke) {
Andreas Gampe878d58c2015-01-15 23:24:00 -0800883 GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimInt, false, GetAssembler());
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800884}
885void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetVolatile(HInvoke* invoke) {
Andreas Gampe878d58c2015-01-15 23:24:00 -0800886 GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimInt, true, GetAssembler());
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800887}
888void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetLong(HInvoke* invoke) {
Andreas Gampe878d58c2015-01-15 23:24:00 -0800889 GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimLong, false, GetAssembler());
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800890}
891void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
Andreas Gampe878d58c2015-01-15 23:24:00 -0800892 GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimLong, true, GetAssembler());
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800893}
Andreas Gampe878d58c2015-01-15 23:24:00 -0800894void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObject(HInvoke* invoke) {
895 GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimNot, false, GetAssembler());
896}
897void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
898 GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimNot, true, GetAssembler());
899}
900
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800901
902static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena,
903 Primitive::Type type,
904 HInvoke* invoke) {
905 LocationSummary* locations = new (arena) LocationSummary(invoke,
906 LocationSummary::kNoCall,
907 kIntrinsified);
Andreas Gampe878d58c2015-01-15 23:24:00 -0800908 locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
Andreas Gampe71fb52f2014-12-29 17:43:08 -0800909 locations->SetInAt(1, Location::RequiresRegister());
910 locations->SetInAt(2, Location::RequiresRegister());
911 locations->SetInAt(3, Location::RequiresRegister());
912 if (type == Primitive::kPrimNot) {
913 // Need temp registers for card-marking.
914 locations->AddTemp(Location::RequiresRegister());
915 locations->AddTemp(Location::RequiresRegister());
916 }
917}
918
919void IntrinsicLocationsBuilderX86_64::VisitUnsafePut(HInvoke* invoke) {
920 CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimInt, invoke);
921}
922void IntrinsicLocationsBuilderX86_64::VisitUnsafePutOrdered(HInvoke* invoke) {
923 CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimInt, invoke);
924}
925void IntrinsicLocationsBuilderX86_64::VisitUnsafePutVolatile(HInvoke* invoke) {
926 CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimInt, invoke);
927}
928void IntrinsicLocationsBuilderX86_64::VisitUnsafePutObject(HInvoke* invoke) {
929 CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimNot, invoke);
930}
931void IntrinsicLocationsBuilderX86_64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
932 CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimNot, invoke);
933}
934void IntrinsicLocationsBuilderX86_64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
935 CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimNot, invoke);
936}
937void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLong(HInvoke* invoke) {
938 CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimLong, invoke);
939}
940void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
941 CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimLong, invoke);
942}
943void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
944 CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimLong, invoke);
945}
946
947// We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86
948// memory model.
949static void GenUnsafePut(LocationSummary* locations, Primitive::Type type, bool is_volatile,
950 CodeGeneratorX86_64* codegen) {
951 X86_64Assembler* assembler = reinterpret_cast<X86_64Assembler*>(codegen->GetAssembler());
952 CpuRegister base = locations->InAt(1).AsRegister<CpuRegister>();
953 CpuRegister offset = locations->InAt(2).AsRegister<CpuRegister>();
954 CpuRegister value = locations->InAt(3).AsRegister<CpuRegister>();
955
956 if (type == Primitive::kPrimLong) {
957 __ movq(Address(base, offset, ScaleFactor::TIMES_1, 0), value);
958 } else {
959 __ movl(Address(base, offset, ScaleFactor::TIMES_1, 0), value);
960 }
961
962 if (is_volatile) {
963 __ mfence();
964 }
965
966 if (type == Primitive::kPrimNot) {
967 codegen->MarkGCCard(locations->GetTemp(0).AsRegister<CpuRegister>(),
968 locations->GetTemp(1).AsRegister<CpuRegister>(),
969 base,
970 value);
971 }
972}
973
974void IntrinsicCodeGeneratorX86_64::VisitUnsafePut(HInvoke* invoke) {
975 GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, false, codegen_);
976}
977void IntrinsicCodeGeneratorX86_64::VisitUnsafePutOrdered(HInvoke* invoke) {
978 GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, false, codegen_);
979}
980void IntrinsicCodeGeneratorX86_64::VisitUnsafePutVolatile(HInvoke* invoke) {
981 GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, true, codegen_);
982}
983void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObject(HInvoke* invoke) {
984 GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, false, codegen_);
985}
986void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
987 GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, false, codegen_);
988}
989void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
990 GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, true, codegen_);
991}
992void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLong(HInvoke* invoke) {
993 GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, false, codegen_);
994}
995void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
996 GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, false, codegen_);
997}
998void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
999 GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, true, codegen_);
1000}
1001
1002// Unimplemented intrinsics.
1003
1004#define UNIMPLEMENTED_INTRINSIC(Name) \
1005void IntrinsicLocationsBuilderX86_64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
1006} \
1007void IntrinsicCodeGeneratorX86_64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
1008}
1009
1010UNIMPLEMENTED_INTRINSIC(IntegerReverse)
1011UNIMPLEMENTED_INTRINSIC(LongReverse)
1012UNIMPLEMENTED_INTRINSIC(MathFloor)
1013UNIMPLEMENTED_INTRINSIC(MathCeil)
1014UNIMPLEMENTED_INTRINSIC(MathRint)
1015UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
1016UNIMPLEMENTED_INTRINSIC(MathRoundFloat)
1017UNIMPLEMENTED_INTRINSIC(StringIsEmpty) // Might not want to do these two anyways, inlining should
1018UNIMPLEMENTED_INTRINSIC(StringLength) // be good enough here.
Andreas Gampe71fb52f2014-12-29 17:43:08 -08001019UNIMPLEMENTED_INTRINSIC(StringIndexOf)
1020UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
1021UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
1022UNIMPLEMENTED_INTRINSIC(UnsafeCASInt)
1023UNIMPLEMENTED_INTRINSIC(UnsafeCASLong)
1024UNIMPLEMENTED_INTRINSIC(UnsafeCASObject)
1025UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
1026
1027} // namespace x86_64
1028} // namespace art