blob: fe40cf2da9f0992103dae08049639c9ee1312202 [file] [log] [blame]
buzbee54330722011-08-23 16:46:55 -07001#if defined(__arm__)
2
3 .balign 4
buzbee4a3164f2011-09-03 11:25:10 -07004
5 .global art_invoke_interface_trampoline
6 .extern artFindInterfaceMethodInCache
7 .extern artFailedInvokeInterface
8art_invoke_interface_trampoline:
9 /*
10 * All generated callsites for interface invokes will load arguments
11 * as usual - except instead of loading arg0/r0 with the target
12 * Method*, arg0/r0 will contain the method_idx. This wrapper will
13 * save arg1-arg3, load the caller's Method*, align the stack and
14 * call the helper artFindInterfaceMethodInCache(idx, this, method);
15 * NOTE: "this" is first visable argument of the target, and so can be
16 * found in arg1/r1.
17 *
18 * artFindInterfaceMethodInCache will attempt to locate the target
19 * and return a 64-bit result in r0/r1 consisting of the target
20 * Method* in r0 and method->code_ in r1.
21 *
22 * If unsuccessful, artFindInterfaceMethodInCache will return
23 * NULL/NULL. This is somewhat different than the usual
24 * mechanism of helper routines performing the unwind & throw.
25 * The reason is that this trampoline is not unwindable. In the
26 * event artFindInterfaceMethodInCache fails to resolve, the wrapper
27 * will prepare an unwindable environment and jump to another helper
28 * to do unwind/throw.
29 *
30 * On success this wrapper will restore arguments and *jump* to the
31 * target, leaving the lr pointing back to the original caller.
32 */
33 stmdb sp!, {r1, r2, r3, lr}
34 ldr r2, [sp, #16] @ load caller's Method*
35 bl artFindInterfaceMethodInCache @ (method_idx, this, callerMethod)
36 mov r12, r1 @ save r0->code_
37 ldmia sp!, {r1, r2, r3, lr} @ restore arguments
38 cmp r0, #0 @ did we find the target?
39 bxne r12 @ tail call to target if so
40 b artFailedInvokeInterface @ Will appear as if called directly
41
buzbee54330722011-08-23 16:46:55 -070042 .global art_shl_long
43art_shl_long:
44 /*
45 * Long integer shift. This is different from the generic 32/64-bit
46 * binary operations because vAA/vBB are 64-bit but vCC (the shift
47 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
48 * 6 bits.
49 * On entry:
50 * r0: low word
51 * r1: high word
52 * r2: shift count
53 */
54 /* shl-long vAA, vBB, vCC */
55 and r2, r2, #63 @ r2<- r2 & 0x3f
56 mov r1, r1, asl r2 @ r1<- r1 << r2
57 rsb r3, r2, #32 @ r3<- 32 - r2
58 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
59 subs ip, r2, #32 @ ip<- r2 - 32
60 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
61 mov r0, r0, asl r2 @ r0<- r0 << r2
62 bx lr
63
64 .balign 4
65 .global art_shr_long
66art_shr_long:
67 /*
68 * Long integer shift. This is different from the generic 32/64-bit
69 * binary operations because vAA/vBB are 64-bit but vCC (the shift
70 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
71 * 6 bits.
72 * On entry:
73 * r0: low word
74 * r1: high word
75 * r2: shift count
76 */
77 /* shr-long vAA, vBB, vCC */
78 and r2, r2, #63 @ r0<- r0 & 0x3f
79 mov r0, r0, lsr r2 @ r0<- r2 >> r2
80 rsb r3, r2, #32 @ r3<- 32 - r2
81 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
82 subs ip, r2, #32 @ ip<- r2 - 32
83 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
84 mov r1, r1, asr r2 @ r1<- r1 >> r2
85 bx lr
86
87 .balign 4
88 .global art_ushr_long
89art_ushr_long:
90 /*
91 * Long integer shift. This is different from the generic 32/64-bit
92 * binary operations because vAA/vBB are 64-bit but vCC (the shift
93 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
94 * 6 bits.
95 * On entry:
96 * r0: low word
97 * r1: high word
98 * r2: shift count
99 */
100 /* ushr-long vAA, vBB, vCC */
101 and r2, r2, #63 @ r0<- r0 & 0x3f
102 mov r0, r0, lsr r2 @ r0<- r2 >> r2
103 rsb r3, r2, #32 @ r3<- 32 - r2
104 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
105 subs ip, r2, #32 @ ip<- r2 - 32
106 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
107 mov r1, r1, lsr r2 @ r1<- r1 >>> r2
108 bx lr
109
110#endif