2 * This file was generated automatically by gen-template.py for 'armv7-a-neon'.
7 /* File: armv5te/header.S */
9 * Copyright (C) 2008 The Android Open Source Project
11 * Licensed under the Apache License, Version 2.0 (the "License");
12 * you may not use this file except in compliance with the License.
13 * You may obtain a copy of the License at
15 * http://www.apache.org/licenses/LICENSE-2.0
17 * Unless required by applicable law or agreed to in writing, software
18 * distributed under the License is distributed on an "AS IS" BASIS,
19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20 * See the License for the specific language governing permissions and
21 * limitations under the License.
27 * ARMv5 definitions and declarations.
31 ARM EABI general notes:
33 r0-r3 hold first 4 args to a method; they are not preserved across method calls
34 r4-r8 are available for general use
35 r9 is given special treatment in some situations, but not for us
36 r10 (sl) seems to be generally available
37 r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
38 r12 (ip) is scratch -- not preserved across method calls
39 r13 (sp) should be managed carefully in case a signal arrives
40 r14 (lr) must be preserved
41 r15 (pc) can be tinkered with directly
43 r0 holds returns of <= 4 bytes
44 r0-r1 hold returns of 8 bytes, low word in r0
46 Callee must save/restore r4+ (except r12) if it modifies them.
48 Stack is "full descending". Only the arguments that don't fit in the first 4
49 registers are placed on the stack. "sp" points at the first stacked argument
52 VFP: single-precision results in s0, double-precision results in d0.
54 In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
55 64-bit quantities (long long, double) must be 64-bit aligned.
61 The following registers have fixed assignments:
64 r5 rFP interpreted frame pointer, used for accessing locals and args
65 r6 rSELF thread pointer
67 The following registers have fixed assignments in mterp but are scratch
68 registers in compiled code
71 r4 rPC interpreted program counter, used for fetching instructions
72 r7 rINST first 16-bit code unit of current instruction
73 r8 rIBASE interpreted instruction base pointer, used for computed goto
75 Macros are provided for common operations. Each macro MUST emit only
76 one instruction to make instruction-counting easier. They MUST NOT alter
77 unspecified registers or condition codes.
80 /* single-purpose registers, given names for clarity */
88 * Given a frame pointer, find the stack save area.
90 * In C this is "((StackSaveArea*)(_fp) -1)".
92 #define SAVEAREA_FROM_FP(_reg, _fpreg) \
93 sub _reg, _fpreg, #sizeofStackSaveArea
96 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)]
99 * This is a #include, not a %include, because we want the C pre-processor
100 * to expand the macros into assembler assignment statements.
102 #include "../../../mterp/common/asm-constants.h"
104 /* File: armv5te-vfp/platform.S */
106 * ===========================================================================
107 * CPU-version-specific defines and utility
108 * ===========================================================================
112 .global dvmCompilerTemplateStart
113 .type dvmCompilerTemplateStart, %function
114 .section .data.rel.ro
116 dvmCompilerTemplateStart:
118 /* ------------------------------ */
120 .global dvmCompiler_TEMPLATE_CMP_LONG
121 dvmCompiler_TEMPLATE_CMP_LONG:
122 /* File: armv5te/TEMPLATE_CMP_LONG.S */
124 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
125 * register based on the results of the comparison.
127 * We load the full values with LDM, but in practice many values could
128 * be resolved by only looking at the high word. This could be made
129 * faster or slower by splitting the LDM into a pair of LDRs.
131 * If we just wanted to set condition flags, we could do this:
135 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific
136 * integer value, which we can do with 2 conditional mov/mvn instructions
137 * (set 1, set -1; if they're equal we already have 0 in ip), giving
138 * us a constant 5-cycle path plus a branch at the end to the
139 * instruction epilogue code. The multi-compare approach below needs
140 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch
141 * in the worst case (the 64-bit values are equal).
143 /* cmp-long vAA, vBB, vCC */
144 cmp r1, r3 @ compare (vBB+1, vCC+1)
145 blt .LTEMPLATE_CMP_LONG_less @ signed compare on high part
146 bgt .LTEMPLATE_CMP_LONG_greater
147 subs r0, r0, r2 @ r0<- r0 - r2
149 bhi .LTEMPLATE_CMP_LONG_greater @ unsigned compare on low part
150 .LTEMPLATE_CMP_LONG_less:
153 .LTEMPLATE_CMP_LONG_greater:
157 /* ------------------------------ */
159 .global dvmCompiler_TEMPLATE_RETURN
160 dvmCompiler_TEMPLATE_RETURN:
161 /* File: armv5te/TEMPLATE_RETURN.S */
163 * Unwind a frame from the Dalvik stack for compiled OP_RETURN_XXX.
164 * If the stored value in returnAddr
165 * is non-zero, the caller is compiled by the JIT thus return to the
166 * address in the code cache following the invoke instruction. Otherwise
167 * return to the special dvmJitToInterpNoChain entry point.
169 #if defined(TEMPLATE_INLINE_PROFILING)
170 stmfd sp!, {r0-r2,lr} @ preserve live registers
173 ldr ip, .LdvmFastMethodTraceExit
175 ldmfd sp!, {r0-r2,lr} @ restore live registers
177 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
178 ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
179 ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
180 ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
181 ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
182 #if !defined(WITH_SELF_VERIFICATION)
183 ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
185 mov r9, #0 @ disable chaining
187 @ r2<- method we're returning to
188 cmp r2, #0 @ break frame?
189 #if !defined(WITH_SELF_VERIFICATION)
190 beq 1f @ bail to interpreter
192 blxeq lr @ punt to interpreter and compare state
194 ldr r1, .LdvmJitToInterpNoChainNoProfile @ defined in footer.S
195 mov rFP, r10 @ publish new FP
196 ldr r10, [r2, #offMethod_clazz] @ r10<- method->clazz
198 str r2, [rSELF, #offThread_method]@ self->method = newSave->method
199 ldr r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex
200 str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
201 add rPC, rPC, #6 @ publish new rPC (advance 6 bytes)
202 str r0, [rSELF, #offThread_methodClassDex]
203 cmp r8, #0 @ check the break flags
204 movne r9, #0 @ clear the chaining cell address
205 str r9, [rSELF, #offThread_inJitCodeCache] @ in code cache or not
206 cmp r9, #0 @ chaining cell exists?
207 blxne r9 @ jump to the chaining cell
208 #if defined(WITH_JIT_TUNING)
209 mov r0, #kCallsiteInterpreted
211 mov pc, r1 @ callsite is interpreted
214 str r0, [rSELF, #offThread_inJitCodeCache] @ reset inJitCodeCache
215 stmia rSELF, {rPC, rFP} @ SAVE_PC_FP_TO_SELF()
216 ldr r2, .LdvmMterpStdBail @ defined in footer.S
217 mov r0, rSELF @ Expecting rSELF in r0
218 blx r2 @ exit the interpreter
220 /* ------------------------------ */
222 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT
223 dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
224 /* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S */
226 * For polymorphic callsites - setup the Dalvik frame and load Dalvik PC
227 * into rPC then jump to dvmJitToInterpNoChain to dispatch the
228 * runtime-resolved callee.
230 @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
231 ldrh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize
232 ldrh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize
233 ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
234 ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
235 add r3, r1, #1 @ Thumb addr is odd
236 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
237 sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
238 SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
239 sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize)
240 cmp r10, r9 @ bottom < interpStackEnd?
241 bxlo lr @ return to raise stack overflow excep.
242 @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
243 ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz
244 ldr r10, [r0, #offMethod_accessFlags] @ r10<- methodToCall->accessFlags
245 str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
246 str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
247 ldr rPC, [r0, #offMethod_insns] @ rPC<- methodToCall->insns
251 str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
252 str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
253 str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
254 cmp r8, #0 @ breakFlags != 0
255 bxne lr @ bail to the interpreter
257 #if !defined(WITH_SELF_VERIFICATION)
260 bxne lr @ bail to the interpreter
263 ldr r10, .LdvmJitToInterpTraceSelectNoChain
264 ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
266 @ Update "thread" values for the new method
267 str r0, [rSELF, #offThread_method] @ self->method = methodToCall
268 str r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
269 mov rFP, r1 @ fp = newFp
270 str rFP, [rSELF, #offThread_curFrame] @ curFrame = newFp
271 #if defined(TEMPLATE_INLINE_PROFILING)
272 stmfd sp!, {r0-r3} @ preserve r0-r3
274 @ r0=methodToCall, r1=rSELF
275 ldr ip, .LdvmFastMethodTraceEnter
277 ldmfd sp!, {r0-r3} @ restore r0-r3
280 @ Start executing the callee
281 #if defined(WITH_JIT_TUNING)
282 mov r0, #kInlineCacheMiss
284 bx r10 @ dvmJitToInterpTraceSelectNoChain
286 /* ------------------------------ */
288 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN
289 dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN:
290 /* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S */
292 * For monomorphic callsite, setup the Dalvik frame and return to the
293 * Thumb code through the link register to transfer control to the callee
294 * method through a dedicated chaining cell.
296 @ r0 = methodToCall, r1 = returnCell, r2 = methodToCall->outsSize
297 @ rPC = dalvikCallsite, r7 = methodToCall->registersSize
298 @ methodToCall is guaranteed to be non-native
300 ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
301 ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
302 add r3, r1, #1 @ Thumb addr is odd
303 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
304 sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
305 SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
306 add r12, lr, #2 @ setup the punt-to-interp address
307 sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize)
308 cmp r10, r9 @ bottom < interpStackEnd?
309 bxlo r12 @ return to raise stack overflow excep.
310 @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
311 ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz
312 str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
313 str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
316 str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
317 str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
318 str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
319 cmp r8, #0 @ breakFlags != 0
320 bxne r12 @ bail to the interpreter
322 ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
324 @ Update "thread" values for the new method
325 str r0, [rSELF, #offThread_method] @ self->method = methodToCall
326 str r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
327 mov rFP, r1 @ fp = newFp
328 str rFP, [rSELF, #offThread_curFrame] @ curFrame = newFp
329 #if defined(TEMPLATE_INLINE_PROFILING)
330 stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
332 @ r0=methodToCall, r1=rSELF
333 ldr ip, .LdvmFastMethodTraceEnter
335 ldmfd sp!, {r0-r2,lr} @ restore registers
338 bx lr @ return to the callee-chaining cell
340 /* ------------------------------ */
342 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN
343 dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN:
344 /* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S */
346 * For polymorphic callsite, check whether the cached class pointer matches
347 * the current one. If so setup the Dalvik frame and return to the
348 * Thumb code through the link register to transfer control to the callee
349 * method through a dedicated chaining cell.
351 * The predicted chaining cell is declared in ArmLIR.h with the
354 * typedef struct PredictedChainingCell {
356 * const ClassObject *clazz;
357 * const Method *method;
359 * } PredictedChainingCell;
361 * Upon returning to the callsite:
362 * - lr : to branch to the chaining cell
363 * - lr+2: to punt to the interpreter
364 * - lr+4: to fully resolve the callee and may rechain.
368 @ r0 = this, r1 = returnCell, r2 = predictedChainCell, rPC = dalvikCallsite
369 ldr r3, [r0, #offObject_clazz] @ r3 <- this->class
370 ldr r8, [r2, #4] @ r8 <- predictedChainCell->clazz
371 ldr r0, [r2, #8] @ r0 <- predictedChainCell->method
372 ldr r9, [rSELF, #offThread_icRechainCount] @ r1 <- shared rechainCount
373 cmp r3, r8 @ predicted class == actual class?
374 #if defined(WITH_JIT_TUNING)
375 ldr r7, .LdvmICHitCount
376 #if defined(WORKAROUND_CORTEX_A9_745320)
377 /* Don't use conditional loads if the HW defect exists */
387 ldreqh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize
388 ldreqh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize
389 beq .LinvokeChain @ predicted chain is valid
390 ldr r7, [r3, #offClassObject_vtable] @ r7 <- this->class->vtable
391 cmp r8, #0 @ initialized class or not
393 subne r1, r9, #1 @ count--
394 strne r1, [rSELF, #offThread_icRechainCount] @ write back to thread
395 add lr, lr, #4 @ return to fully-resolve landing pad
398 * r2 <- &predictedChainCell
401 * r7 <- this->class->vtable
405 /* ------------------------------ */
407 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE
408 dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
409 /* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S */
410 @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
411 @ r7 = methodToCall->registersSize
412 ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
413 ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
414 add r3, r1, #1 @ Thumb addr is odd
415 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
416 sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
417 SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
418 cmp r10, r9 @ bottom < interpStackEnd?
419 bxlo lr @ return to raise stack overflow excep.
420 @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
421 str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
422 str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
425 str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
426 str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
427 str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
428 cmp r8, #0 @ breakFlags != 0
429 ldr r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
430 #if !defined(WITH_SELF_VERIFICATION)
431 bxne lr @ bail to the interpreter
433 bx lr @ bail to interpreter unconditionally
436 @ go ahead and transfer control to the native code
437 ldr r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
439 str r1, [rSELF, #offThread_curFrame] @ curFrame = newFp
440 str r2, [rSELF, #offThread_inJitCodeCache] @ not in the jit code cache
441 str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
442 @ newFp->localRefCookie=top
443 SAVEAREA_FROM_FP(r10, r1) @ r10<- new stack save area
445 mov r2, r0 @ arg2<- methodToCall
446 mov r0, r1 @ arg0<- newFP
447 add r1, rSELF, #offThread_retval @ arg1<- &retval
448 mov r3, rSELF @ arg3<- self
449 #if defined(TEMPLATE_INLINE_PROFILING)
450 @ r2=methodToCall, r6=rSELF
451 stmfd sp!, {r2,r6} @ to be consumed after JNI return
452 stmfd sp!, {r0-r3} @ preserve r0-r3
455 @ r0=JNIMethod, r1=rSELF
456 ldr ip, .LdvmFastMethodTraceEnter
458 ldmfd sp!, {r0-r3} @ restore r0-r3
461 blx r8 @ off to the native code
463 #if defined(TEMPLATE_INLINE_PROFILING)
464 ldmfd sp!, {r0-r1} @ restore r2 and r6
465 @ r0=JNIMethod, r1=rSELF
466 ldr ip, .LdvmFastNativeMethodTraceExit
469 @ native return; r10=newSaveArea
470 @ equivalent to dvmPopJniLocals
471 ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
472 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
473 ldr r1, [rSELF, #offThread_exception] @ check for exception
474 str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
476 str r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
477 ldr r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
479 @ r0 = dalvikCallsitePC
480 bne .LhandleException @ no, handle exception
482 str r2, [rSELF, #offThread_inJitCodeCache] @ set the mode properly
483 cmp r2, #0 @ return chaining cell still exists?
484 bxne r2 @ yes - go ahead
486 @ continue executing the next instruction through the interpreter
487 ldr r1, .LdvmJitToInterpTraceSelectNoChain @ defined in footer.S
488 add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
489 #if defined(WITH_JIT_TUNING)
490 mov r0, #kCallsiteInterpreted
494 /* ------------------------------ */
496 .global dvmCompiler_TEMPLATE_MUL_LONG
497 dvmCompiler_TEMPLATE_MUL_LONG:
498 /* File: armv5te/TEMPLATE_MUL_LONG.S */
500 * Signed 64-bit integer multiply.
502 * For JIT: op1 in r0/r1, op2 in r2/r3, return in r0/r1
504 * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
511 * The low word of the result holds ZX, the high word holds
512 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
513 * it doesn't fit in the low 64 bits.
515 * Unlike most ARM math operations, multiply instructions have
516 * restrictions on using the same register more than once (Rd and Rm
517 * cannot be the same).
519 /* mul-long vAA, vBB, vCC */
520 mul ip, r2, r1 @ ip<- ZxW
521 umull r9, r10, r2, r0 @ r9/r10 <- ZxX
522 mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
524 add r1, r2, r10 @ r1<- r10 + low(ZxW + (YxX))
527 /* ------------------------------ */
529 .global dvmCompiler_TEMPLATE_SHL_LONG
530 dvmCompiler_TEMPLATE_SHL_LONG:
531 /* File: armv5te/TEMPLATE_SHL_LONG.S */
533 * Long integer shift. This is different from the generic 32/64-bit
534 * binary operations because vAA/vBB are 64-bit but vCC (the shift
535 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
538 /* shl-long vAA, vBB, vCC */
539 and r2, r2, #63 @ r2<- r2 & 0x3f
540 mov r1, r1, asl r2 @ r1<- r1 << r2
541 rsb r3, r2, #32 @ r3<- 32 - r2
542 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
543 subs ip, r2, #32 @ ip<- r2 - 32
544 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
545 mov r0, r0, asl r2 @ r0<- r0 << r2
548 /* ------------------------------ */
550 .global dvmCompiler_TEMPLATE_SHR_LONG
551 dvmCompiler_TEMPLATE_SHR_LONG:
552 /* File: armv5te/TEMPLATE_SHR_LONG.S */
554 * Long integer shift. This is different from the generic 32/64-bit
555 * binary operations because vAA/vBB are 64-bit but vCC (the shift
556 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
559 /* shr-long vAA, vBB, vCC */
560 and r2, r2, #63 @ r0<- r0 & 0x3f
561 mov r0, r0, lsr r2 @ r0<- r2 >> r2
562 rsb r3, r2, #32 @ r3<- 32 - r2
563 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
564 subs ip, r2, #32 @ ip<- r2 - 32
565 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
566 mov r1, r1, asr r2 @ r1<- r1 >> r2
569 /* ------------------------------ */
571 .global dvmCompiler_TEMPLATE_USHR_LONG
572 dvmCompiler_TEMPLATE_USHR_LONG:
573 /* File: armv5te/TEMPLATE_USHR_LONG.S */
575 * Long integer shift. This is different from the generic 32/64-bit
576 * binary operations because vAA/vBB are 64-bit but vCC (the shift
577 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
580 /* ushr-long vAA, vBB, vCC */
581 and r2, r2, #63 @ r0<- r0 & 0x3f
582 mov r0, r0, lsr r2 @ r0<- r2 >> r2
583 rsb r3, r2, #32 @ r3<- 32 - r2
584 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
585 subs ip, r2, #32 @ ip<- r2 - 32
586 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
587 mov r1, r1, lsr r2 @ r1<- r1 >>> r2
590 /* ------------------------------ */
592 .global dvmCompiler_TEMPLATE_THROW_EXCEPTION_COMMON
593 dvmCompiler_TEMPLATE_THROW_EXCEPTION_COMMON:
594 /* File: armv5te/TEMPLATE_THROW_EXCEPTION_COMMON.S */
596 * Throw an exception from JIT'ed code.
598 * r0 Dalvik PC that raises the exception
602 /* ------------------------------ */
604 .global dvmCompiler_TEMPLATE_MEM_OP_DECODE
605 dvmCompiler_TEMPLATE_MEM_OP_DECODE:
606 /* File: armv5te-vfp/TEMPLATE_MEM_OP_DECODE.S */
607 #if defined(WITH_SELF_VERIFICATION)
609 * This handler encapsulates heap memory ops for selfVerification mode.
611 * The call to the handler is inserted prior to a heap memory operation.
612 * This handler then calls a function to decode the memory op, and process
613 * it accordingly. Afterwards, the handler changes the return address to
614 * skip the memory op so it never gets executed.
616 vpush {d0-d15} @ save out all fp registers
617 push {r0-r12,lr} @ save out all registers
618 ldr r2, .LdvmSelfVerificationMemOpDecode @ defined in footer.S
619 mov r0, lr @ arg0 <- link register
620 mov r1, sp @ arg1 <- stack pointer
621 blx r2 @ decode and handle the mem op
622 pop {r0-r12,lr} @ restore all registers
623 vpop {d0-d15} @ restore all fp registers
624 bx lr @ return to compiled code
627 /* ------------------------------ */
629 .global dvmCompiler_TEMPLATE_STRING_COMPARETO
630 dvmCompiler_TEMPLATE_STRING_COMPARETO:
631 /* File: armv5te/TEMPLATE_STRING_COMPARETO.S */
633 * String's compareTo.
635 * Requires r0/r1 to have been previously checked for null. Will
636 * return negative if this's string is < comp, 0 if they are the
637 * same and positive if >.
641 * This code relies on hard-coded offsets for string objects, and must be
642 * kept in sync with definitions in UtfString.h. See asm-constants.h
645 * r0: this object pointer
646 * r1: comp object pointer
650 mov r2, r0 @ this to r2, opening up r0 for return value
651 subs r0, r2, r1 @ Same?
654 ldr r4, [r2, #STRING_FIELDOFF_OFFSET]
655 ldr r9, [r1, #STRING_FIELDOFF_OFFSET]
656 ldr r7, [r2, #STRING_FIELDOFF_COUNT]
657 ldr r10, [r1, #STRING_FIELDOFF_COUNT]
658 ldr r2, [r2, #STRING_FIELDOFF_VALUE]
659 ldr r1, [r1, #STRING_FIELDOFF_VALUE]
662 * At this point, we have:
666 * We're going to compute
673 /* Now, build pointers to the string data */
674 add r2, r2, r4, lsl #1
675 add r1, r1, r9, lsl #1
677 * Note: data pointers point to previous element so we can use pre-index
678 * mode with base writeback.
680 add r2, #16-2 @ offset to contents[-1]
681 add r1, #16-2 @ offset to contents[-1]
684 * At this point we have:
685 * r2: *this string data
686 * r1: *comp string data
687 * r10: iteration count for comparison
688 * r11: value to return if the first part of the string is equal
689 * r0: reserved for result
690 * r3, r4, r7, r8, r9, r12 available for loading string data
697 * Unroll the first two checks so we can quickly catch early mismatch
698 * on long strings (but preserve incoming alignment)
749 /* Long string case */
766 /* ------------------------------ */
768 .global dvmCompiler_TEMPLATE_STRING_INDEXOF
769 dvmCompiler_TEMPLATE_STRING_INDEXOF:
770 /* File: armv5te/TEMPLATE_STRING_INDEXOF.S */
774 * Requires r0 to have been previously checked for null. Will
775 * return index of match of r1 in r0.
779 * This code relies on hard-coded offsets for string objects, and must be
780 * kept in sync wth definitions in UtfString.h See asm-constants.h
783 * r0: string object pointer
785 * r2: Starting offset in string data
788 ldr r3, [r0, #STRING_FIELDOFF_VALUE]
789 ldr r7, [r0, #STRING_FIELDOFF_OFFSET]
790 ldr r8, [r0, #STRING_FIELDOFF_COUNT]
794 * At this point, we have:
796 * r2: starting offset
797 * r3: object pointer (final result -> r0)
802 /* Build pointer to start of string data */
804 add r0, r3, r7, lsl #1
806 /* Save a copy of starting data in r7 */
809 /* Clamp start to [0..count] */
815 /* Build pointer to start of data to compare and pre-bias */
816 add r0, r0, r2, lsl #1
819 /* Compute iteration count */
823 * At this point we have:
824 * r0: start of data to test
825 * r1: chat to compare
826 * r8: iteration count
827 * r7: original start of string
828 * r3, r4, r9, r10, r11, r12 available for loading string data
832 blt indexof_remainder
885 /* ------------------------------ */
887 .global dvmCompiler_TEMPLATE_INTERPRET
888 dvmCompiler_TEMPLATE_INTERPRET:
889 /* File: armv5te/TEMPLATE_INTERPRET.S */
891 * This handler transfers control to the interpeter without performing
892 * any lookups. It may be called either as part of a normal chaining
893 * operation, or from the transition code in header.S. We distinquish
894 * the two cases by looking at the link register. If called from a
895 * translation chain, it will point to the chaining Dalvik PC -3.
898 * r1 - the Dalvik PC to begin interpretation.
900 * [lr, #3] contains Dalvik PC to begin interpretation
901 * rSELF - pointer to thread
902 * rFP - Dalvik frame pointer
905 #if defined(WORKAROUND_CORTEX_A9_745320)
906 /* Don't use conditional loads if the HW defect exists */
914 mov r0, r1 @ set Dalvik PC
919 .word dvmJitToInterpPunt
921 /* ------------------------------ */
923 .global dvmCompiler_TEMPLATE_MONITOR_ENTER
924 dvmCompiler_TEMPLATE_MONITOR_ENTER:
925 /* File: armv5te/TEMPLATE_MONITOR_ENTER.S */
927 * Call out to the runtime to lock an object. Because this thread
928 * may have been suspended in THREAD_MONITOR state and the Jit's
929 * translation cache subsequently cleared, we cannot return directly.
930 * Instead, unconditionally transition to the interpreter to resume.
934 * r1 - the object (which has already been null-checked by the caller
935 * r4 - the Dalvik PC of the following instruction.
937 ldr r2, .LdvmLockObject
938 mov r3, #0 @ Record that we're not returning
939 str r3, [r0, #offThread_inJitCodeCache]
940 blx r2 @ dvmLockObject(self, obj)
941 ldr r2, .LdvmJitToInterpNoChain
942 @ Bail to interpreter - no chain [note - r4 still contains rPC]
943 #if defined(WITH_JIT_TUNING)
944 mov r0, #kHeavyweightMonitor
948 /* ------------------------------ */
950 .global dvmCompiler_TEMPLATE_MONITOR_ENTER_DEBUG
951 dvmCompiler_TEMPLATE_MONITOR_ENTER_DEBUG:
952 /* File: armv5te/TEMPLATE_MONITOR_ENTER_DEBUG.S */
954 * To support deadlock prediction, this version of MONITOR_ENTER
955 * will always call the heavyweight dvmLockObject, check for an
956 * exception and then bail out to the interpreter.
960 * r1 - the object (which has already been null-checked by the caller
961 * r4 - the Dalvik PC of the following instruction.
964 ldr r2, .LdvmLockObject
965 mov r3, #0 @ Record that we're not returning
966 str r3, [r0, #offThread_inJitCodeCache]
967 blx r2 @ dvmLockObject(self, obj)
969 ldr r1, [rSELF, #offThread_exception]
972 ldr r2, .LhandleException
973 sub r0, r4, #2 @ roll dPC back to this monitor instruction
976 @ Bail to interpreter - no chain [note - r4 still contains rPC]
977 #if defined(WITH_JIT_TUNING)
978 mov r0, #kHeavyweightMonitor
980 ldr pc, .LdvmJitToInterpNoChain
982 /* ------------------------------ */
984 .global dvmCompiler_TEMPLATE_PERIODIC_PROFILING
985 dvmCompiler_TEMPLATE_PERIODIC_PROFILING:
986 /* File: armv5te/TEMPLATE_PERIODIC_PROFILING.S */
988 * Increment profile counter for this trace, and decrement
989 * sample counter. If sample counter goes below zero, turn
993 * (lr-11) is address of pointer to counter. Note: the counter
994 * actually exists 10 bytes before the return target, but because
995 * we are arriving from thumb mode, lr will have its low bit set.
998 ldr r1, [rSELF, #offThread_pProfileCountdown]
999 ldr r2, [r0] @ get counter
1000 ldr r3, [r1] @ get countdown timer
1003 blt .LTEMPLATE_PERIODIC_PROFILING_disable_profiling
1008 .LTEMPLATE_PERIODIC_PROFILING_disable_profiling:
1009 mov r4, lr @ preserve lr
1010 ldr r0, .LdvmJitTraceProfilingOff
1014 /* ------------------------------ */
1016 .global dvmCompiler_TEMPLATE_RETURN_PROF
1017 dvmCompiler_TEMPLATE_RETURN_PROF:
1018 /* File: armv5te/TEMPLATE_RETURN_PROF.S */
1019 #define TEMPLATE_INLINE_PROFILING
1020 /* File: armv5te/TEMPLATE_RETURN.S */
1022 * Unwind a frame from the Dalvik stack for compiled OP_RETURN_XXX.
1023 * If the stored value in returnAddr
1024 * is non-zero, the caller is compiled by the JIT thus return to the
1025 * address in the code cache following the invoke instruction. Otherwise
1026 * return to the special dvmJitToInterpNoChain entry point.
1028 #if defined(TEMPLATE_INLINE_PROFILING)
1029 stmfd sp!, {r0-r2,lr} @ preserve live registers
1032 ldr ip, .LdvmFastMethodTraceExit
1034 ldmfd sp!, {r0-r2,lr} @ restore live registers
1036 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
1037 ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
1038 ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
1039 ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
1040 ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
1041 #if !defined(WITH_SELF_VERIFICATION)
1042 ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
1044 mov r9, #0 @ disable chaining
1046 @ r2<- method we're returning to
1047 cmp r2, #0 @ break frame?
1048 #if !defined(WITH_SELF_VERIFICATION)
1049 beq 1f @ bail to interpreter
1051 blxeq lr @ punt to interpreter and compare state
1053 ldr r1, .LdvmJitToInterpNoChainNoProfile @ defined in footer.S
1054 mov rFP, r10 @ publish new FP
1055 ldr r10, [r2, #offMethod_clazz] @ r10<- method->clazz
1057 str r2, [rSELF, #offThread_method]@ self->method = newSave->method
1058 ldr r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex
1059 str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
1060 add rPC, rPC, #6 @ publish new rPC (advance 6 bytes)
1061 str r0, [rSELF, #offThread_methodClassDex]
1062 cmp r8, #0 @ check the break flags
1063 movne r9, #0 @ clear the chaining cell address
1064 str r9, [rSELF, #offThread_inJitCodeCache] @ in code cache or not
1065 cmp r9, #0 @ chaining cell exists?
1066 blxne r9 @ jump to the chaining cell
1067 #if defined(WITH_JIT_TUNING)
1068 mov r0, #kCallsiteInterpreted
1070 mov pc, r1 @ callsite is interpreted
1073 str r0, [rSELF, #offThread_inJitCodeCache] @ reset inJitCodeCache
1074 stmia rSELF, {rPC, rFP} @ SAVE_PC_FP_TO_SELF()
1075 ldr r2, .LdvmMterpStdBail @ defined in footer.S
1076 mov r0, rSELF @ Expecting rSELF in r0
1077 blx r2 @ exit the interpreter
1079 #undef TEMPLATE_INLINE_PROFILING
1081 /* ------------------------------ */
1083 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF
1084 dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
1085 /* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT_PROF.S */
1086 #define TEMPLATE_INLINE_PROFILING
1087 /* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S */
1089 * For polymorphic callsites - setup the Dalvik frame and load Dalvik PC
1090 * into rPC then jump to dvmJitToInterpNoChain to dispatch the
1091 * runtime-resolved callee.
1093 @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
1094 ldrh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize
1095 ldrh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize
1096 ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
1097 ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
1098 add r3, r1, #1 @ Thumb addr is odd
1099 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
1100 sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
1101 SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
1102 sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize)
1103 cmp r10, r9 @ bottom < interpStackEnd?
1104 bxlo lr @ return to raise stack overflow excep.
1105 @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
1106 ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz
1107 ldr r10, [r0, #offMethod_accessFlags] @ r10<- methodToCall->accessFlags
1108 str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
1109 str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
1110 ldr rPC, [r0, #offMethod_insns] @ rPC<- methodToCall->insns
1113 @ set up newSaveArea
1114 str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
1115 str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
1116 str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
1117 cmp r8, #0 @ breakFlags != 0
1118 bxne lr @ bail to the interpreter
1119 tst r10, #ACC_NATIVE
1120 #if !defined(WITH_SELF_VERIFICATION)
1123 bxne lr @ bail to the interpreter
1126 ldr r10, .LdvmJitToInterpTraceSelectNoChain
1127 ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
1129 @ Update "thread" values for the new method
1130 str r0, [rSELF, #offThread_method] @ self->method = methodToCall
1131 str r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
1132 mov rFP, r1 @ fp = newFp
1133 str rFP, [rSELF, #offThread_curFrame] @ curFrame = newFp
1134 #if defined(TEMPLATE_INLINE_PROFILING)
1135 stmfd sp!, {r0-r3} @ preserve r0-r3
1137 @ r0=methodToCall, r1=rSELF
1138 ldr ip, .LdvmFastMethodTraceEnter
1140 ldmfd sp!, {r0-r3} @ restore r0-r3
1143 @ Start executing the callee
1144 #if defined(WITH_JIT_TUNING)
1145 mov r0, #kInlineCacheMiss
1147 bx r10 @ dvmJitToInterpTraceSelectNoChain
1149 #undef TEMPLATE_INLINE_PROFILING
1151 /* ------------------------------ */
1153 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF
1154 dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF:
1155 /* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN_PROF.S */
1156 #define TEMPLATE_INLINE_PROFILING
1157 /* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S */
1159 * For monomorphic callsite, setup the Dalvik frame and return to the
1160 * Thumb code through the link register to transfer control to the callee
1161 * method through a dedicated chaining cell.
1163 @ r0 = methodToCall, r1 = returnCell, r2 = methodToCall->outsSize
1164 @ rPC = dalvikCallsite, r7 = methodToCall->registersSize
1165 @ methodToCall is guaranteed to be non-native
1167 ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
1168 ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
1169 add r3, r1, #1 @ Thumb addr is odd
1170 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
1171 sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
1172 SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
1173 add r12, lr, #2 @ setup the punt-to-interp address
1174 sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize)
1175 cmp r10, r9 @ bottom < interpStackEnd?
1176 bxlo r12 @ return to raise stack overflow excep.
1177 @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
1178 ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz
1179 str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
1180 str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
1182 @ set up newSaveArea
1183 str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
1184 str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
1185 str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
1186 cmp r8, #0 @ breakFlags != 0
1187 bxne r12 @ bail to the interpreter
1189 ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
1191 @ Update "thread" values for the new method
1192 str r0, [rSELF, #offThread_method] @ self->method = methodToCall
1193 str r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
1194 mov rFP, r1 @ fp = newFp
1195 str rFP, [rSELF, #offThread_curFrame] @ curFrame = newFp
1196 #if defined(TEMPLATE_INLINE_PROFILING)
1197 stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
1199 @ r0=methodToCall, r1=rSELF
1200 ldr ip, .LdvmFastMethodTraceEnter
1202 ldmfd sp!, {r0-r2,lr} @ restore registers
1205 bx lr @ return to the callee-chaining cell
1207 #undef TEMPLATE_INLINE_PROFILING
1209 /* ------------------------------ */
1211 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF
1212 dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF:
1213 /* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF.S */
1214 #define TEMPLATE_INLINE_PROFILING
1215 /* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S */
1217 * For polymorphic callsite, check whether the cached class pointer matches
1218 * the current one. If so setup the Dalvik frame and return to the
1219 * Thumb code through the link register to transfer control to the callee
1220 * method through a dedicated chaining cell.
1222 * The predicted chaining cell is declared in ArmLIR.h with the
1225 * typedef struct PredictedChainingCell {
1227 * const ClassObject *clazz;
1228 * const Method *method;
1230 * } PredictedChainingCell;
1232 * Upon returning to the callsite:
1233 * - lr : to branch to the chaining cell
1234 * - lr+2: to punt to the interpreter
1235 * - lr+4: to fully resolve the callee and may rechain.
1239 @ r0 = this, r1 = returnCell, r2 = predictedChainCell, rPC = dalvikCallsite
1240 ldr r3, [r0, #offObject_clazz] @ r3 <- this->class
1241 ldr r8, [r2, #4] @ r8 <- predictedChainCell->clazz
1242 ldr r0, [r2, #8] @ r0 <- predictedChainCell->method
1243 ldr r9, [rSELF, #offThread_icRechainCount] @ r1 <- shared rechainCount
1244 cmp r3, r8 @ predicted class == actual class?
1245 #if defined(WITH_JIT_TUNING)
1246 ldr r7, .LdvmICHitCount
1247 #if defined(WORKAROUND_CORTEX_A9_745320)
1248 /* Don't use conditional loads if the HW defect exists */
1258 ldreqh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize
1259 ldreqh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize
1260 beq .LinvokeChainProf @ predicted chain is valid
1261 ldr r7, [r3, #offClassObject_vtable] @ r7 <- this->class->vtable
1262 cmp r8, #0 @ initialized class or not
1264 subne r1, r9, #1 @ count--
1265 strne r1, [rSELF, #offThread_icRechainCount] @ write back to thread
1266 add lr, lr, #4 @ return to fully-resolve landing pad
1269 * r2 <- &predictedChainCell
1272 * r7 <- this->class->vtable
1276 #undef TEMPLATE_INLINE_PROFILING
1278 /* ------------------------------ */
1280 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF
1281 dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
1282 /* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE_PROF.S */
1283 #define TEMPLATE_INLINE_PROFILING
1284 /* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S */
1285 @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
1286 @ r7 = methodToCall->registersSize
1287 ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
1288 ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
1289 add r3, r1, #1 @ Thumb addr is odd
1290 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
1291 sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
1292 SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
1293 cmp r10, r9 @ bottom < interpStackEnd?
1294 bxlo lr @ return to raise stack overflow excep.
1295 @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
1296 str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
1297 str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
1299 @ set up newSaveArea
1300 str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
1301 str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
1302 str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
1303 cmp r8, #0 @ breakFlags != 0
1304 ldr r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
1305 #if !defined(WITH_SELF_VERIFICATION)
1306 bxne lr @ bail to the interpreter
1308 bx lr @ bail to interpreter unconditionally
1311 @ go ahead and transfer control to the native code
1312 ldr r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
1314 str r1, [rSELF, #offThread_curFrame] @ curFrame = newFp
1315 str r2, [rSELF, #offThread_inJitCodeCache] @ not in the jit code cache
1316 str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
1317 @ newFp->localRefCookie=top
1318 SAVEAREA_FROM_FP(r10, r1) @ r10<- new stack save area
1320 mov r2, r0 @ arg2<- methodToCall
1321 mov r0, r1 @ arg0<- newFP
1322 add r1, rSELF, #offThread_retval @ arg1<- &retval
1323 mov r3, rSELF @ arg3<- self
1324 #if defined(TEMPLATE_INLINE_PROFILING)
1325 @ r2=methodToCall, r6=rSELF
1326 stmfd sp!, {r2,r6} @ to be consumed after JNI return
1327 stmfd sp!, {r0-r3} @ preserve r0-r3
1330 @ r0=JNIMethod, r1=rSELF
1331 ldr ip, .LdvmFastMethodTraceEnter
1333 ldmfd sp!, {r0-r3} @ restore r0-r3
1336 blx r8 @ off to the native code
1338 #if defined(TEMPLATE_INLINE_PROFILING)
1339 ldmfd sp!, {r0-r1} @ restore r2 and r6
1340 @ r0=JNIMethod, r1=rSELF
1341 ldr ip, .LdvmFastNativeMethodTraceExit
1344 @ native return; r10=newSaveArea
1345 @ equivalent to dvmPopJniLocals
1346 ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
1347 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
1348 ldr r1, [rSELF, #offThread_exception] @ check for exception
1349 str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
1351 str r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
1352 ldr r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
1354 @ r0 = dalvikCallsitePC
1355 bne .LhandleException @ no, handle exception
1357 str r2, [rSELF, #offThread_inJitCodeCache] @ set the mode properly
1358 cmp r2, #0 @ return chaining cell still exists?
1359 bxne r2 @ yes - go ahead
1361 @ continue executing the next instruction through the interpreter
1362 ldr r1, .LdvmJitToInterpTraceSelectNoChain @ defined in footer.S
1363 add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
1364 #if defined(WITH_JIT_TUNING)
1365 mov r0, #kCallsiteInterpreted
1369 #undef TEMPLATE_INLINE_PROFILING
1371 .size dvmCompilerTemplateStart, .-dvmCompilerTemplateStart
1372 /* File: armv5te/footer.S */
1374 * ===========================================================================
1375 * Common subroutines and data
1376 * ===========================================================================
1379 .section .data.rel.ro
1382 @ Prep for the native call
1383 @ r1 = newFP, r0 = methodToCall
1385 ldr r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
1386 str r2, [rSELF, #offThread_inJitCodeCache] @ not in jit code cache
1387 str r1, [rSELF, #offThread_curFrame] @ curFrame = newFp
1388 str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
1389 @ newFp->localRefCookie=top
1390 ldrh lr, [rSELF, #offThread_subMode]
1391 SAVEAREA_FROM_FP(r10, r1) @ r10<- new stack save area
1393 mov r2, r0 @ r2<- methodToCall
1394 mov r0, r1 @ r0<- newFP
1395 add r1, rSELF, #offThread_retval @ r1<- &retval
1396 mov r3, rSELF @ arg3<- self
1397 ands lr, #kSubModeMethodTrace
1398 beq 121f @ hop if not profiling
1399 @ r2: methodToCall, r6: rSELF
1404 ldr ip, .LdvmFastMethodTraceEnter
1408 ldr ip, [r2, #offMethod_nativeFunc]
1412 ldr ip, .LdvmFastNativeMethodTraceExit
1416 ldr ip, [r2, #offMethod_nativeFunc]
1420 @ native return; r10=newSaveArea
1421 @ equivalent to dvmPopJniLocals
1422 ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
1423 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
1424 ldr r1, [rSELF, #offThread_exception] @ check for exception
1425 str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
1427 str r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
1428 ldr r0, [r10, #offStackSaveArea_savedPc] @ reload rPC
1430 @ r0 = dalvikCallsitePC
1431 bne .LhandleException @ no, handle exception
1433 str r2, [rSELF, #offThread_inJitCodeCache] @ set the new mode
1434 cmp r2, #0 @ return chaining cell still exists?
1435 bxne r2 @ yes - go ahead
1437 @ continue executing the next instruction through the interpreter
1438 ldr r1, .LdvmJitToInterpTraceSelectNoChain @ defined in footer.S
1439 add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
1440 #if defined(WITH_JIT_TUNING)
1441 mov r0, #kCallsiteInterpreted
1447 * r0 Faulting Dalvik PC
1450 #if defined(WITH_SELF_VERIFICATION)
1451 ldr pc, .LdeadFood @ should not see this under self-verification mode
1456 str r2, [rSELF, #offThread_inJitCodeCache] @ in interpreter land
1457 ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
1458 ldr rIBASE, .LdvmAsmInstructionStart @ same as above
1459 mov rPC, r0 @ reload the faulting Dalvik address
1460 bx r1 @ branch to dvmMterpCommonExceptionThrown
1463 .LdvmAsmInstructionStart:
1464 .word dvmAsmInstructionStart
1465 .LdvmJitToInterpNoChainNoProfile:
1466 .word dvmJitToInterpNoChainNoProfile
1467 .LdvmJitToInterpTraceSelectNoChain:
1468 .word dvmJitToInterpTraceSelectNoChain
1469 .LdvmJitToInterpNoChain:
1470 .word dvmJitToInterpNoChain
1472 .word dvmMterpStdBail
1473 .LdvmMterpCommonExceptionThrown:
1474 .word dvmMterpCommonExceptionThrown
1477 .LdvmJitTraceProfilingOff:
1478 .word dvmJitTraceProfilingOff
1479 #if defined(WITH_JIT_TUNING)
1481 .word gDvmICHitCount
1483 #if defined(WITH_SELF_VERIFICATION)
1484 .LdvmSelfVerificationMemOpDecode:
1485 .word dvmSelfVerificationMemOpDecode
1487 .LdvmFastMethodTraceEnter:
1488 .word dvmFastMethodTraceEnter
1489 .LdvmFastNativeMethodTraceExit:
1490 .word dvmFastNativeMethodTraceExit
1491 .LdvmFastMethodTraceExit:
1492 .word dvmFastMethodTraceExit
1494 .word __aeabi_cdcmple
1496 .word __aeabi_cfcmple
1498 .global dmvCompilerTemplateEnd
1499 dmvCompilerTemplateEnd:
1501 #endif /* WITH_JIT */