blob: 64772c804d7f63e08a7b2f1fc492bc3e708a100b [file] [log] [blame]
Alexey Frunze00b53b72016-02-02 20:25:45 -08001/*
2 * We've detected a condition that will result in an exception, but the exception
3 * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
4 * TUNING: for consistency, we may want to just go ahead and handle these here.
5 */
6
7 .extern MterpLogDivideByZeroException
8common_errDivideByZero:
9 EXPORT_PC
10#if MTERP_LOGGING
11 move a0, rSELF
12 daddu a1, rFP, OFF_FP_SHADOWFRAME
13 jal MterpLogDivideByZeroException
14#endif
15 b MterpCommonFallback
16
17 .extern MterpLogArrayIndexException
18common_errArrayIndex:
19 EXPORT_PC
20#if MTERP_LOGGING
21 move a0, rSELF
22 daddu a1, rFP, OFF_FP_SHADOWFRAME
23 jal MterpLogArrayIndexException
24#endif
25 b MterpCommonFallback
26
27 .extern MterpLogNullObjectException
28common_errNullObject:
29 EXPORT_PC
30#if MTERP_LOGGING
31 move a0, rSELF
32 daddu a1, rFP, OFF_FP_SHADOWFRAME
33 jal MterpLogNullObjectException
34#endif
35 b MterpCommonFallback
36
37/*
38 * If we're here, something is out of the ordinary. If there is a pending
39 * exception, handle it. Otherwise, roll back and retry with the reference
40 * interpreter.
41 */
42MterpPossibleException:
43 ld a0, THREAD_EXCEPTION_OFFSET(rSELF)
44 beqzc a0, MterpFallback # If not, fall back to reference interpreter.
45 /* intentional fallthrough - handle pending exception. */
46/*
47 * On return from a runtime helper routine, we've found a pending exception.
48 * Can we handle it here - or need to bail out to caller?
49 *
50 */
51 .extern MterpHandleException
Alexey Frunzedb045be2016-03-03 17:50:48 -080052 .extern MterpShouldSwitchInterpreters
Alexey Frunze00b53b72016-02-02 20:25:45 -080053MterpException:
54 move a0, rSELF
55 daddu a1, rFP, OFF_FP_SHADOWFRAME
56 jal MterpHandleException # (self, shadow_frame)
57 beqzc v0, MterpExceptionReturn # no local catch, back to caller.
58 ld a0, OFF_FP_CODE_ITEM(rFP)
59 lwu a1, OFF_FP_DEX_PC(rFP)
60 REFRESH_IBASE
61 daddu rPC, a0, CODEITEM_INSNS_OFFSET
62 dlsa rPC, a1, rPC, 1 # generate new dex_pc_ptr
Alexey Frunzedb045be2016-03-03 17:50:48 -080063 /* Do we need to switch interpreters? */
64 jal MterpShouldSwitchInterpreters
65 bnezc v0, MterpFallback
Alexey Frunze00b53b72016-02-02 20:25:45 -080066 /* resume execution at catch block */
Alexey Frunzedb045be2016-03-03 17:50:48 -080067 EXPORT_PC
Alexey Frunze00b53b72016-02-02 20:25:45 -080068 FETCH_INST
69 GET_INST_OPCODE v0
70 GOTO_OPCODE v0
71 /* NOTE: no fallthrough */
72
73/*
Douglas Leung020b18a2016-06-03 18:05:35 -070074 * Common handling for branches with support for Jit profiling.
75 * On entry:
76 * rINST <= signed offset
77 * rPROFILE <= signed hotness countdown (expanded to 64 bits)
78 *
79 * We have quite a few different cases for branch profiling, OSR detection and
80 * suspend check support here.
81 *
82 * Taken backward branches:
83 * If profiling active, do hotness countdown and report if we hit zero.
84 * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
85 * Is there a pending suspend request? If so, suspend.
86 *
87 * Taken forward branches and not-taken backward branches:
88 * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
89 *
90 * Our most common case is expected to be a taken backward branch with active jit profiling,
91 * but no full OSR check and no pending suspend request.
92 * Next most common case is not-taken branch with no full OSR check.
93 *
Alexey Frunze00b53b72016-02-02 20:25:45 -080094 */
Douglas Leung020b18a2016-06-03 18:05:35 -070095MterpCommonTakenBranchNoFlags:
96 bgtzc rINST, .L_forward_branch # don't add forward branches to hotness
97/*
98 * We need to subtract 1 from positive values and we should not see 0 here,
99 * so we may use the result of the comparison with -1.
100 */
101 li v0, JIT_CHECK_OSR
102 beqc rPROFILE, v0, .L_osr_check
103 bltc rPROFILE, v0, .L_resume_backward_branch
104 dsubu rPROFILE, 1
105 beqzc rPROFILE, .L_add_batch # counted down to zero - report
106.L_resume_backward_branch:
107 lw ra, THREAD_FLAGS_OFFSET(rSELF)
Alexey Frunze00b53b72016-02-02 20:25:45 -0800108 REFRESH_IBASE
Douglas Leung020b18a2016-06-03 18:05:35 -0700109 daddu a2, rINST, rINST # a2<- byte offset
110 FETCH_ADVANCE_INST_RB a2 # update rPC, load rINST
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700111 and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
Douglas Leung020b18a2016-06-03 18:05:35 -0700112 bnezc ra, .L_suspend_request_pending
113 GET_INST_OPCODE v0 # extract opcode from rINST
114 GOTO_OPCODE v0 # jump to next instruction
115
116.L_suspend_request_pending:
Alexey Frunze00b53b72016-02-02 20:25:45 -0800117 EXPORT_PC
118 move a0, rSELF
Douglas Leung020b18a2016-06-03 18:05:35 -0700119 jal MterpSuspendCheck # (self)
120 bnezc v0, MterpFallback
121 REFRESH_IBASE # might have changed during suspend
122 GET_INST_OPCODE v0 # extract opcode from rINST
123 GOTO_OPCODE v0 # jump to next instruction
124
125.L_no_count_backwards:
126 li v0, JIT_CHECK_OSR # check for possible OSR re-entry
127 bnec rPROFILE, v0, .L_resume_backward_branch
128.L_osr_check:
129 move a0, rSELF
130 daddu a1, rFP, OFF_FP_SHADOWFRAME
131 move a2, rINST
132 EXPORT_PC
133 jal MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
134 bnezc v0, MterpOnStackReplacement
135 b .L_resume_backward_branch
136
137.L_forward_branch:
138 li v0, JIT_CHECK_OSR # check for possible OSR re-entry
139 beqc rPROFILE, v0, .L_check_osr_forward
140.L_resume_forward_branch:
141 daddu a2, rINST, rINST # a2<- byte offset
142 FETCH_ADVANCE_INST_RB a2 # update rPC, load rINST
143 GET_INST_OPCODE v0 # extract opcode from rINST
144 GOTO_OPCODE v0 # jump to next instruction
145
146.L_check_osr_forward:
147 move a0, rSELF
148 daddu a1, rFP, OFF_FP_SHADOWFRAME
149 move a2, rINST
150 EXPORT_PC
151 jal MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
152 bnezc v0, MterpOnStackReplacement
153 b .L_resume_forward_branch
154
155.L_add_batch:
156 daddu a1, rFP, OFF_FP_SHADOWFRAME
157 sh rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
158 ld a0, OFF_FP_METHOD(rFP)
159 move a2, rSELF
160 jal MterpAddHotnessBatch # (method, shadow_frame, self)
161 move rPROFILE, v0 # restore new hotness countdown to rPROFILE
162 b .L_no_count_backwards
163
164/*
165 * Entered from the conditional branch handlers when OSR check request active on
166 * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
167 */
168.L_check_not_taken_osr:
169 move a0, rSELF
170 daddu a1, rFP, OFF_FP_SHADOWFRAME
171 li a2, 2
172 EXPORT_PC
173 jal MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
174 bnezc v0, MterpOnStackReplacement
175 FETCH_ADVANCE_INST 2
176 GET_INST_OPCODE v0 # extract opcode from rINST
177 GOTO_OPCODE v0 # jump to next instruction
Alexey Frunze00b53b72016-02-02 20:25:45 -0800178
179/*
Alexey Frunzedb045be2016-03-03 17:50:48 -0800180 * On-stack replacement has happened, and now we've returned from the compiled method.
181 */
182MterpOnStackReplacement:
183#if MTERP_LOGGING
184 move a0, rSELF
185 daddu a1, rFP, OFF_FP_SHADOWFRAME
186 move a2, rINST # rINST contains offset
187 jal MterpLogOSR
188#endif
189 li v0, 1 # Signal normal return
190 b MterpDone
191
192/*
Alexey Frunze00b53b72016-02-02 20:25:45 -0800193 * Bail out to reference interpreter.
194 */
195 .extern MterpLogFallback
196MterpFallback:
197 EXPORT_PC
198#if MTERP_LOGGING
199 move a0, rSELF
200 daddu a1, rFP, OFF_FP_SHADOWFRAME
201 jal MterpLogFallback
202#endif
203MterpCommonFallback:
204 li v0, 0 # signal retry with reference interpreter.
205 b MterpDone
206
207/*
208 * We pushed some registers on the stack in ExecuteMterpImpl, then saved
209 * SP and RA. Here we restore SP, restore the registers, and then restore
210 * RA to PC.
211 *
212 * On entry:
213 * uint32_t* rFP (should still be live, pointer to base of vregs)
214 */
215MterpExceptionReturn:
216 li v0, 1 # signal return to caller.
217 b MterpDone
218/*
219 * Returned value is expected in a0 and if it's not 64-bit, the 32 most
Douglas Leung4b787062016-08-26 15:25:31 -0700220 * significant bits of a0 must be zero-extended or sign-extended
221 * depending on the return type.
Alexey Frunze00b53b72016-02-02 20:25:45 -0800222 */
223MterpReturn:
224 ld a2, OFF_FP_RESULT_REGISTER(rFP)
225 lw ra, THREAD_FLAGS_OFFSET(rSELF)
226 sd a0, 0(a2)
227 move a0, rSELF
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700228 and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
Alexey Frunze00b53b72016-02-02 20:25:45 -0800229 beqzc ra, check2
230 jal MterpSuspendCheck # (self)
231check2:
232 li v0, 1 # signal return to caller.
233MterpDone:
Douglas Leung020b18a2016-06-03 18:05:35 -0700234/*
235 * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
236 * checking for OSR. If greater than zero, we might have unreported hotness to register
237 * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
238 * should only reach zero immediately after a hotness decrement, and is then reset to either
239 * a negative special state or the new non-zero countdown value.
240 */
241 blez rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
242
243MterpProfileActive:
244 move rINST, v0 # stash return value
245 /* Report cached hotness counts */
246 ld a0, OFF_FP_METHOD(rFP)
247 daddu a1, rFP, OFF_FP_SHADOWFRAME
248 move a2, rSELF
249 sh rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
250 jal MterpAddHotnessBatch # (method, shadow_frame, self)
251 move v0, rINST # restore return value
252
253.L_pop_and_return:
254 ld s6, STACK_OFFSET_S6(sp)
255 .cfi_restore 22
Alexey Frunze00b53b72016-02-02 20:25:45 -0800256 ld s5, STACK_OFFSET_S5(sp)
257 .cfi_restore 21
258 ld s4, STACK_OFFSET_S4(sp)
259 .cfi_restore 20
260 ld s3, STACK_OFFSET_S3(sp)
261 .cfi_restore 19
262 ld s2, STACK_OFFSET_S2(sp)
263 .cfi_restore 18
264 ld s1, STACK_OFFSET_S1(sp)
265 .cfi_restore 17
266 ld s0, STACK_OFFSET_S0(sp)
267 .cfi_restore 16
268
269 ld ra, STACK_OFFSET_RA(sp)
270 .cfi_restore 31
271
272 ld t8, STACK_OFFSET_GP(sp)
273 .cpreturn
274 .cfi_restore 28
275
276 .set noreorder
277 jr ra
278 daddu sp, sp, STACK_SIZE
279 .cfi_adjust_cfa_offset -STACK_SIZE
280
281 .cfi_endproc
Douglas Leung020b18a2016-06-03 18:05:35 -0700282 .set reorder
Alexey Frunze00b53b72016-02-02 20:25:45 -0800283 .size ExecuteMterpImpl, .-ExecuteMterpImpl