dvmInitBreakpoints();
gDvm.debuggerActive = true;
dvmUpdateAllInterpBreak(kInterpDebugBreak, kSubModeDebuggerActive, true);
+#if defined(WITH_JIT)
+ dvmCompilerUpdateGlobalState();
+#endif
}
/*
gDvm.debuggerActive = false;
dvmUpdateAllInterpBreak(kInterpDebugBreak, kSubModeDebuggerActive, false);
+#if defined(WITH_JIT)
+ dvmCompilerUpdateGlobalState();
+#endif
dvmHashTableLock(gDvm.dbgRegistry);
gDvm.debuggerConnected = false;
// Tell the threads
dvmUpdateAllInterpBreak(newBreak, newMode, enable);
+#if defined(WITH_JIT)
+ dvmCompilerUpdateGlobalState();
+#endif
+
LOGD("+++ active profiler count now %d\n", newValue);
}
/*
* Register the METHOD_TRACE_EXIT action for the fast interpreter and
* JIT'ed code for JNI methods. The about-to-return JNI callee method is passed
- * in explicitly.
+ * in explicitly. Also used for inline-execute.
*/
void dvmFastNativeMethodTraceExit(const Method* method, Thread* self)
{
dvmCompilerDumpStats();
/* Stress-test unchain all */
dvmJitUnchainAll();
- LOGD("Send %d more signals to rest the code cache",
+ LOGD("Send %d more signals to reset the code cache",
codeCacheResetCount & 7);
}
+ dvmCheckInterpStateConsistency();
}
#endif
newThread->prev = gDvm.threadList;
gDvm.threadList->next = newThread;
+ /* Add any existing global modes to the interpBreak control */
+ dvmInitializeInterpBreak(newThread);
+
if (!dvmGetFieldBoolean(threadObj, gDvm.offJavaLangThread_daemon))
gDvm.nonDaemonThreadCount++; // guarded by thread list lock
gDvmJit.pProfTable = dvmDebuggerOrProfilerActive() ? NULL : pJitProfTable;
gDvmJit.pProfTableCopy = pJitProfTable;
gDvmJit.pJitTraceProfCounters = pJitTraceProfCounters;
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateAll();
dvmUnlockMutex(&gDvmJit.tableLock);
/* Signal running threads to refresh their cached pJitTable pointers */
/* Disable new translation requests */
gDvmJit.pProfTable = NULL;
gDvmJit.pProfTableCopy = NULL;
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateAll();
if (gDvm.verboseShutdown ||
gDvmJit.profileMode == kTraceProfilingContinuous) {
*/
}
-void dvmCompilerStateRefresh()
+void dvmCompilerUpdateGlobalState()
{
bool jitActive;
bool jitActivate;
if (needUnchain)
dvmJitUnchainAll();
// Make sure all threads have current values
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateAll();
}
bool isIterative);
void dvmCompilerMethodSSATransformation(struct CompilationUnit *cUnit);
bool dvmCompilerBuildLoop(struct CompilationUnit *cUnit);
-void dvmCompilerStateRefresh(void);
+void dvmCompilerUpdateGlobalState(void);
JitTraceDescription *dvmCopyTraceDescriptor(const u2 *pc,
const struct JitEntry *desc);
void *dvmCompilerGetInterpretTemplate();
gDvmJit.disableOpt |= (1 << kMethodJit);
// Make sure all threads have current values
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateAll();
return true;
}
gDvmJit.disableOpt |= (1 << kMethodJit);
// Make sure all threads have current values
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateAll();
return true;
}
gDvmJit.disableOpt |= (1 << kMethodJit);
// Make sure all threads have current values
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateAll();
return true;
}
gDvmJit.disableOpt |= (1 << kMethodJit);
// Make sure all threads have current values
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateAll();
return true;
}
sizeof(struct JitToInterpEntries)) <= 128);
// Make sure all threads have current values
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateAll();
return true;
}
mov r3, #0 @ Record that we're not returning
str r3, [r0, #offThread_inJitCodeCache]
blx r2 @ dvmLockObject(self, obj)
- @ refresh Jit's on/off status & test for exception
+ @ test for exception
ldr r1, [rSELF, #offThread_exception]
cmp r1, #0
beq 1f
mov r3, #0 @ Record that we're not returning
str r3, [r0, #offThread_inJitCodeCache]
blx r2 @ dvmLockObject(self, obj)
- @ refresh Jit's on/off status & test for exception
+ @ test for exception
ldr r1, [rSELF, #offThread_exception]
cmp r1, #0
beq 1f
mov r3, #0 @ Record that we're not returning
str r3, [r0, #offThread_inJitCodeCache]
blx r2 @ dvmLockObject(self, obj)
- @ refresh Jit's on/off status & test for exception
+ @ test for exception
ldr r1, [rSELF, #offThread_exception]
cmp r1, #0
beq 1f
mov r3, #0 @ Record that we're not returning
str r3, [r0, #offThread_inJitCodeCache]
blx r2 @ dvmLockObject(self, obj)
- @ refresh Jit's on/off status & test for exception
+ @ test for exception
ldr r1, [rSELF, #offThread_exception]
cmp r1, #0
beq 1f
mov r3, #0 @ Record that we're not returning
str r3, [r0, #offThread_inJitCodeCache]
blx r2 @ dvmLockObject(self, obj)
- @ refresh Jit's on/off status & test for exception
+ @ test for exception
ldr r1, [rSELF, #offThread_exception]
cmp r1, #0
beq 1f
/*
* The interpreter just threw. Handle any special subMode requirements.
+ * All interpSave state must be valid on entry.
*/
-void dvmReportExceptionThrow(Thread* self, const Method* curMethod,
- const u2* pc, void* fp)
+void dvmReportExceptionThrow(Thread* self, Object* exception)
{
+ const Method* curMethod = self->interpSave.method;
#if defined(WITH_JIT)
if (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) {
- dvmJitEndTraceSelect(self, pc);
+ dvmJitEndTraceSelect(self, self->interpSave.pc);
}
if (self->interpBreak.ctl.breakFlags & kInterpSingleStep) {
/* Discard any single-step native returns to translation */
#endif
if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
void *catchFrame;
- int offset = pc - curMethod->insns;
- int catchRelPc = dvmFindCatchBlock(self, offset, self->exception,
+ int offset = self->interpSave.pc - curMethod->insns;
+ int catchRelPc = dvmFindCatchBlock(self, offset, exception,
true, &catchFrame);
- dvmDbgPostException(fp, offset, catchFrame, catchRelPc,
- self->exception);
+ dvmDbgPostException(self->interpSave.fp, offset, catchFrame,
+ catchRelPc, exception);
}
}
/*
* The interpreter is preparing to do an invoke (both native & normal).
- * Handle any special subMode requirements.
+ * Handle any special subMode requirements. All interpSave state
+ * must be valid on entry.
*/
void dvmReportInvoke(Thread* self, const Method* methodToCall)
{
* The interpreter is preparing to do a native invoke. Handle any
* special subMode requirements. NOTE: for a native invoke,
* dvmReportInvoke() and dvmReportPreNativeInvoke() will both
- * be called prior to the invoke.
+ * be called prior to the invoke. All interpSave state must
+ * be valid on entry.
*/
-void dvmReportPreNativeInvoke(const u2* pc, Thread* self,
- const Method* methodToCall)
+void dvmReportPreNativeInvoke(const Method* methodToCall, Thread* self)
{
#if defined(WITH_JIT)
/*
* builder can't follow into or through a native method.
*/
if (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) {
- dvmCheckJit(pc, self);
+ dvmCheckJit(self->interpSave.pc, self);
}
#endif
if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
/*
* The interpreter has returned from a native invoke. Handle any
- * special subMode requirements.
+ * special subMode requirements. All interpSave state must be
+ * valid on entry.
*/
-void dvmReportPostNativeInvoke(const u2* pc, Thread* self,
- const Method* methodToCall)
+void dvmReportPostNativeInvoke(const Method* methodToCall, Thread* self)
{
if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
Object* thisPtr = dvmGetThisPtr(self->interpSave.method,
/*
* The interpreter has returned from a normal method. Handle any special
- * subMode requirements.
+ * subMode requirements. All interpSave state must be valid on entry.
*/
-void dvmReportReturn(Thread* self, const u2* pc, const u4* prevFP)
+void dvmReportReturn(Thread* self)
{
TRACE_METHOD_EXIT(self, self->interpSave.method);
#if defined(WITH_JIT)
- if (dvmIsBreakFrame(prevFP) &&
+ if (dvmIsBreakFrame(self->interpSave.fp) &&
(self->interpBreak.ctl.subMode & kSubModeJitTraceBuild)) {
- dvmCheckJit(pc, self);
+ dvmCheckJit(self->interpSave.pc, self);
}
#endif
}
* breakpoints. We may be able to speed things up a bit if we don't query
* the event list unless we know there's at least one lurking within.
*/
-void dvmUpdateDebugger(const Method* method, const u2* pc, const u4* fp,
- bool methodEntry, Thread* self)
+static void updateDebugger(const Method* method, const u2* pc, const u4* fp,
+ bool methodEntry, Thread* self)
{
int eventFlags = 0;
}
/*
+ * Do a sanity check on interpreter state saved to Thread.
+ * A failure here doesn't necessarily mean that something is wrong,
+ * so this code should only be used during development to suggest
+ * a possible problem.
+ */
+void dvmCheckInterpStateConsistency()
+{
+ Thread* self = dvmThreadSelf();
+ Thread* thread;
+ uint8_t breakFlags;
+ uint8_t subMode;
+ void* handlerTable;
+
+ dvmLockThreadList(self);
+ breakFlags = self->interpBreak.ctl.breakFlags;
+ subMode = self->interpBreak.ctl.subMode;
+ handlerTable = self->interpBreak.ctl.curHandlerTable;
+ for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+ if (subMode != thread->interpBreak.ctl.subMode) {
+ LOGD("Warning: subMode mismatch - 0x%x:0x%x, tid[%d]",
+ subMode,thread->interpBreak.ctl.subMode,thread->threadId);
+ }
+ if (breakFlags != thread->interpBreak.ctl.breakFlags) {
+ LOGD("Warning: breakFlags mismatch - 0x%x:0x%x, tid[%d]",
+ breakFlags,thread->interpBreak.ctl.breakFlags,thread->threadId);
+ }
+ if (handlerTable != thread->interpBreak.ctl.curHandlerTable) {
+ LOGD("Warning: curHandlerTable mismatch - 0x%x:0x%x, tid[%d]",
+ (int)handlerTable,(int)thread->interpBreak.ctl.curHandlerTable,
+ thread->threadId);
+ }
+#if defined(WITH_JIT)
+ if (thread->pJitProfTable != gDvmJit.pProfTable) {
+ LOGD("Warning: pJitProfTable mismatch - 0x%x:0x%x, tid[%d]",
+ (int)thread->pJitProfTable,(int)gDvmJit.pProfTable,
+ thread->threadId);
+ }
+ if (thread->jitThreshold != gDvmJit.threshold) {
+ LOGD("Warning: jitThreshold mismatch - 0x%x:0x%x, tid[%d]",
+ (int)thread->jitThreshold,(int)gDvmJit.threshold,
+ thread->threadId);
+ }
+#endif
+ }
+ dvmUnlockThreadList();
+}
+
+/*
* Arm a safepoint callback for a thread. If funct is null,
* clear any pending callback.
* TODO: only gc is currently using this feature, and will have
self->icRechainCount = PREDICTED_CHAIN_COUNTER_RECHAIN;
self->pProfileCountdown = &gDvmJit.profileCountdown;
// Jit state that can change
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateSingle(self);
#endif
+}
+/*
+ * For a newly-created thread, we need to start off with interpBreak
+ * set to any existing global modes. The caller must hold the
+ * thread list lock.
+ */
+void dvmInitializeInterpBreak(Thread* thread)
+{
+ u1 flags = 0;
+ u1 subModes = 0;
+
+ if (gDvm.instructionCountEnableCount > 0) {
+ flags |= kInterpInstCountBreak;
+ subModes |= kSubModeInstCounting;
+ }
+ if (dvmIsMethodTraceActive()) {
+ subModes |= kSubModeMethodTrace;
+ }
+ if (gDvm.debuggerActive) {
+ flags |= kInterpDebugBreak;
+ subModes |= kSubModeDebuggerActive;
+ }
+ dvmUpdateInterpBreak(thread, flags, subModes, true);
}
/*
}
if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
- dvmUpdateDebugger(method, pc, fp,
- self->debugIsMethodEntry, self);
+ updateDebugger(method, pc, fp,
+ self->debugIsMethodEntry, self);
}
if (gDvm.instructionCountEnableCount != 0) {
/*
/*
* Debugger support
*/
-void dvmUpdateDebugger(const Method* method, const u2* pc, const u4* fp,
- bool methodEntry, Thread* self);
void dvmCheckBefore(const u2 *dPC, u4 *fp, Thread* self);
-void dvmReportExceptionThrow(Thread* self, const Method* curMethod,
- const u2* pc, void* fp);
-void dvmReportPreNativeInvoke(const u2* pc, Thread* self,
- const Method* methodToCall);
-void dvmReportPostNativeInvoke(const u2* pc, Thread* self,
- const Method* methodToCall);
+void dvmReportExceptionThrow(Thread* self, Object* exception);
+void dvmReportPreNativeInvoke(const Method* methodToCall, Thread* self);
+void dvmReportPostNativeInvoke(const Method* methodToCall, Thread* self);
void dvmReportInvoke(Thread* self, const Method* methodToCall);
-void dvmReportReturn(Thread* self, const u2* pc, const u4* prevFP);
+void dvmReportReturn(Thread* self);
/*
* Update interpBreak
void dvmUpdateInterpBreak(Thread* thread, int newBreak, int newMode,
bool enable);
void dvmAddToSuspendCounts(Thread* thread, int delta, int dbgDelta);
+void dvmCheckInterpStateConsistency();
+void dvmInitializeInterpBreak(Thread* thread);
/*
* Update interpBreak for all threads
* free it because some thread may be holding a reference.
*/
gDvmJit.pProfTable = NULL;
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateAll();
}
#if defined(WITH_JIT_TUNING)
}
/*
+ * Update JIT-specific info in Thread structure for a single thread
+ */
+void dvmJitUpdateThreadStateSingle(Thread* thread)
+{
+ thread->pJitProfTable = gDvmJit.pProfTable;
+ thread->jitThreshold = gDvmJit.threshold;
+}
+
+/*
* Walk through the thread list and refresh all local copies of
* JIT global state (which was placed there for fast access).
*/
-void dvmJitUpdateState()
+void dvmJitUpdateThreadStateAll()
{
Thread* self = dvmThreadSelf();
Thread* thread;
dvmLockThreadList(self);
for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
- thread->pJitProfTable = gDvmJit.pProfTable;
- thread->jitThreshold = gDvmJit.threshold;
+ dvmJitUpdateThreadStateSingle(thread);
}
dvmUnlockThreadList();
void dvmJitTraceProfilingOn(void);
void dvmJitChangeProfileMode(TraceProfilingModes newState);
void dvmJitDumpTraceDesc(JitTraceDescription *trace);
-void dvmJitUpdateState(void);
+void dvmJitUpdateThreadStateSingle(Thread* threead);
+void dvmJitUpdateThreadStateAll(void);
void dvmJitResumeTranslation(Thread* self, const u2* pc, const u4* fp);
#endif /*_DALVIK_INTERP_JIT*/
*/
.L${opcode}_continue:
rsb r0, r0, #4 @ r0<- 4-r0
- FETCH(r9, 2) @ r9<- FEDC
+ FETCH(rINST, 2) @ rINST<- FEDC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
bl common_abort @ (skipped due to ARM prefetch)
-4: and ip, r9, #0xf000 @ isolate F
+4: and ip, rINST, #0xf000 @ isolate F
ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
-3: and ip, r9, #0x0f00 @ isolate E
+3: and ip, rINST, #0x0f00 @ isolate E
ldr r2, [rFP, ip, lsr #6] @ r2<- vE
-2: and ip, r9, #0x00f0 @ isolate D
+2: and ip, rINST, #0x00f0 @ isolate D
ldr r1, [rFP, ip, lsr #2] @ r1<- vD
-1: and ip, r9, #0x000f @ isolate C
+1: and ip, rINST, #0x000f @ isolate C
ldr r0, [rFP, ip, lsl #2] @ r0<- vC
0:
- ldr r9, .L${opcode}_table @ table of InlineOperation
- ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
+ ldr rINST, .L${opcode}_table @ table of InlineOperation
+ ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
/*
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .L${opcode}_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
mov r0, rINST, lsr #12 @ r0<- B
str r1, [sp] @ push &self->retval
bl .L${opcode}_continue @ make call; will return after
+ mov rINST, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, r9 @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp rINST, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .L${opcode}_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
sub sp, sp, #8 @ make room for arg, +64 bit align
- mov r0, rINST, lsr #12 @ r0<- B
+ mov r0, rINST, lsr #8 @ r0<- B
+ mov rINST, r9 @ rINST<- method
str r1, [sp] @ push &self->retval
bl .L${opcode}_continue @ make call; will return after
+ mov r9, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, rINST @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp r9, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
2:
@ Profiling - record method entry. r0: methodToCall
stmfd sp!, {r0-r3} @ preserve r0-r3
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r1, r0
mov r0, rSELF
bl dvmReportInvoke @ (self, method)
11:
@ r0=newFp, r1=&retval, r2=methodToCall, r3=self, lr=subModes
stmfd sp!, {r0-r3} @ save all but subModes
- mov r0, rPC
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPreNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPreNativeInvoke @ (methodToCall, self)
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
@ Finish up any post-invoke subMode requirements
- mov r0, rPC
+ @ interpSave already up-to-date
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPostNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPostNativeInvoke @ (methodToCall, self)
b 7b @ resume
.LstackOverflow: @ r0=methodToCall
19:
@ Handle special actions
@ On entry, r0: StackSaveArea
- ldr r2, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
- mov r1, rPC
+ ldr r1, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str r1, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF
- bl dvmReportReturn @ (self, pc, prevFP)
+ bl dvmReportReturn @ (self)
SAVEAREA_FROM_FP(r0, rFP) @ restore StackSaveArea
b 14b @ continue
@ Manage debugger bookkeeping
7:
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF @ arg0<- self
- ldr r1, [rSELF, #offThread_method] @ arg1<- curMethod
- mov r2, rPC @ arg2<- pc
- mov r3, rFP @ arg3<- fp
- bl dvmReportExceptionThrow @ (self, method, pc, fp)
+ mov r1, r9 @ arg1<- exception
+ bl dvmReportExceptionThrow @ (self, exception)
b 8b @ resume with normal handling
.LnotCaughtLocally: @ r9=exception
* for the sake of anything that needs to do disambiguation in a
* common handler with INST_INST.
*
- * The breakpoint itself is handled over in dvmUpdateDebugger(),
+ * The breakpoint itself is handled over in updateDebugger(),
* because we need to detect other events (method entry, single
* step) and report them in the same event packet, and we're not
* yet handling those through breakpoint instructions. By the
/* Handle any special subMode requirements */
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportReturn(self, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportReturn(self);
}
if (dvmIsBreakFrame(fp)) {
/* update thread FP, and reset local variables */
self->curFrame = fp;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = saveArea->savedPc;
* debugger.
*/
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportExceptionThrow(self, curMethod, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportExceptionThrow(self, exception);
}
/*
*/
//fp = (u4*) self->curFrame;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = curMethod->insns + catchRelPc;
* calls. For native calls, we'll mark EXIT on return.
* For non-native calls, EXIT is marked in the RETURN op.
*/
+ PC_FP_TO_SELF();
dvmReportInvoke(self, methodToCall);
}
* frame pointer and other local state, and continue.
*/
curMethod = methodToCall;
+ self->interpSave.method = curMethod;
methodClassDex = curMethod->clazz->pDvmDex;
pc = methodToCall->insns;
self->curFrame = fp = newFp;
DUMP_REGS(methodToCall, newFp, true); // show input args
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPreNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPreNativeInvoke(methodToCall, self);
}
ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
(*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPostNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPostNativeInvoke(methodToCall, self);
}
/* pop frame off */
#define JIT_STUB_HACK(x)
#endif
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
/*
* Opcode handler framing macros. Here, each opcode is a separate function
*/
.LOP_EXECUTE_INLINE_continue:
rsb r0, r0, #4 @ r0<- 4-r0
- FETCH(r9, 2) @ r9<- FEDC
+ FETCH(rINST, 2) @ rINST<- FEDC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
bl common_abort @ (skipped due to ARM prefetch)
-4: and ip, r9, #0xf000 @ isolate F
+4: and ip, rINST, #0xf000 @ isolate F
ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
-3: and ip, r9, #0x0f00 @ isolate E
+3: and ip, rINST, #0x0f00 @ isolate E
ldr r2, [rFP, ip, lsr #6] @ r2<- vE
-2: and ip, r9, #0x00f0 @ isolate D
+2: and ip, rINST, #0x00f0 @ isolate D
ldr r1, [rFP, ip, lsr #2] @ r1<- vD
-1: and ip, r9, #0x000f @ isolate C
+1: and ip, rINST, #0x000f @ isolate C
ldr r0, [rFP, ip, lsl #2] @ r0<- vC
0:
- ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
- ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
+ ldr rINST, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
+ ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
/*
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .LOP_EXECUTE_INLINE_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
mov r0, rINST, lsr #12 @ r0<- B
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_continue @ make call; will return after
+ mov rINST, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, r9 @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp rINST, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .LOP_EXECUTE_INLINE_RANGE_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
sub sp, sp, #8 @ make room for arg, +64 bit align
- mov r0, rINST, lsr #12 @ r0<- B
+ mov r0, rINST, lsr #8 @ r0<- B
+ mov rINST, r9 @ rINST<- method
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after
+ mov r9, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, rINST @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp r9, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
2:
@ Profiling - record method entry. r0: methodToCall
stmfd sp!, {r0-r3} @ preserve r0-r3
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r1, r0
mov r0, rSELF
bl dvmReportInvoke @ (self, method)
11:
@ r0=newFp, r1=&retval, r2=methodToCall, r3=self, lr=subModes
stmfd sp!, {r0-r3} @ save all but subModes
- mov r0, rPC
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPreNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPreNativeInvoke @ (methodToCall, self)
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
@ Finish up any post-invoke subMode requirements
- mov r0, rPC
+ @ interpSave already up-to-date
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPostNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPostNativeInvoke @ (methodToCall, self)
b 7b @ resume
.LstackOverflow: @ r0=methodToCall
19:
@ Handle special actions
@ On entry, r0: StackSaveArea
- ldr r2, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
- mov r1, rPC
+ ldr r1, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str r1, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF
- bl dvmReportReturn @ (self, pc, prevFP)
+ bl dvmReportReturn @ (self)
SAVEAREA_FROM_FP(r0, rFP) @ restore StackSaveArea
b 14b @ continue
@ Manage debugger bookkeeping
7:
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF @ arg0<- self
- ldr r1, [rSELF, #offThread_method] @ arg1<- curMethod
- mov r2, rPC @ arg2<- pc
- mov r3, rFP @ arg3<- fp
- bl dvmReportExceptionThrow @ (self, method, pc, fp)
+ mov r1, r9 @ arg1<- exception
+ bl dvmReportExceptionThrow @ (self, exception)
b 8b @ resume with normal handling
.LnotCaughtLocally: @ r9=exception
*/
.LOP_EXECUTE_INLINE_continue:
rsb r0, r0, #4 @ r0<- 4-r0
- FETCH(r9, 2) @ r9<- FEDC
+ FETCH(rINST, 2) @ rINST<- FEDC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
bl common_abort @ (skipped due to ARM prefetch)
-4: and ip, r9, #0xf000 @ isolate F
+4: and ip, rINST, #0xf000 @ isolate F
ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
-3: and ip, r9, #0x0f00 @ isolate E
+3: and ip, rINST, #0x0f00 @ isolate E
ldr r2, [rFP, ip, lsr #6] @ r2<- vE
-2: and ip, r9, #0x00f0 @ isolate D
+2: and ip, rINST, #0x00f0 @ isolate D
ldr r1, [rFP, ip, lsr #2] @ r1<- vD
-1: and ip, r9, #0x000f @ isolate C
+1: and ip, rINST, #0x000f @ isolate C
ldr r0, [rFP, ip, lsl #2] @ r0<- vC
0:
- ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
- ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
+ ldr rINST, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
+ ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
/*
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .LOP_EXECUTE_INLINE_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
mov r0, rINST, lsr #12 @ r0<- B
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_continue @ make call; will return after
+ mov rINST, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, r9 @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp rINST, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .LOP_EXECUTE_INLINE_RANGE_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
sub sp, sp, #8 @ make room for arg, +64 bit align
- mov r0, rINST, lsr #12 @ r0<- B
+ mov r0, rINST, lsr #8 @ r0<- B
+ mov rINST, r9 @ rINST<- method
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after
+ mov r9, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, rINST @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp r9, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
2:
@ Profiling - record method entry. r0: methodToCall
stmfd sp!, {r0-r3} @ preserve r0-r3
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r1, r0
mov r0, rSELF
bl dvmReportInvoke @ (self, method)
11:
@ r0=newFp, r1=&retval, r2=methodToCall, r3=self, lr=subModes
stmfd sp!, {r0-r3} @ save all but subModes
- mov r0, rPC
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPreNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPreNativeInvoke @ (methodToCall, self)
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
@ Finish up any post-invoke subMode requirements
- mov r0, rPC
+ @ interpSave already up-to-date
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPostNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPostNativeInvoke @ (methodToCall, self)
b 7b @ resume
.LstackOverflow: @ r0=methodToCall
19:
@ Handle special actions
@ On entry, r0: StackSaveArea
- ldr r2, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
- mov r1, rPC
+ ldr r1, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str r1, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF
- bl dvmReportReturn @ (self, pc, prevFP)
+ bl dvmReportReturn @ (self)
SAVEAREA_FROM_FP(r0, rFP) @ restore StackSaveArea
b 14b @ continue
@ Manage debugger bookkeeping
7:
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF @ arg0<- self
- ldr r1, [rSELF, #offThread_method] @ arg1<- curMethod
- mov r2, rPC @ arg2<- pc
- mov r3, rFP @ arg3<- fp
- bl dvmReportExceptionThrow @ (self, method, pc, fp)
+ mov r1, r9 @ arg1<- exception
+ bl dvmReportExceptionThrow @ (self, exception)
b 8b @ resume with normal handling
.LnotCaughtLocally: @ r9=exception
*/
.LOP_EXECUTE_INLINE_continue:
rsb r0, r0, #4 @ r0<- 4-r0
- FETCH(r9, 2) @ r9<- FEDC
+ FETCH(rINST, 2) @ rINST<- FEDC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
bl common_abort @ (skipped due to ARM prefetch)
-4: and ip, r9, #0xf000 @ isolate F
+4: and ip, rINST, #0xf000 @ isolate F
ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
-3: and ip, r9, #0x0f00 @ isolate E
+3: and ip, rINST, #0x0f00 @ isolate E
ldr r2, [rFP, ip, lsr #6] @ r2<- vE
-2: and ip, r9, #0x00f0 @ isolate D
+2: and ip, rINST, #0x00f0 @ isolate D
ldr r1, [rFP, ip, lsr #2] @ r1<- vD
-1: and ip, r9, #0x000f @ isolate C
+1: and ip, rINST, #0x000f @ isolate C
ldr r0, [rFP, ip, lsl #2] @ r0<- vC
0:
- ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
- ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
+ ldr rINST, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
+ ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
/*
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .LOP_EXECUTE_INLINE_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
mov r0, rINST, lsr #12 @ r0<- B
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_continue @ make call; will return after
+ mov rINST, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, r9 @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp rINST, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .LOP_EXECUTE_INLINE_RANGE_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
sub sp, sp, #8 @ make room for arg, +64 bit align
- mov r0, rINST, lsr #12 @ r0<- B
+ mov r0, rINST, lsr #8 @ r0<- B
+ mov rINST, r9 @ rINST<- method
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after
+ mov r9, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, rINST @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp r9, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
2:
@ Profiling - record method entry. r0: methodToCall
stmfd sp!, {r0-r3} @ preserve r0-r3
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r1, r0
mov r0, rSELF
bl dvmReportInvoke @ (self, method)
11:
@ r0=newFp, r1=&retval, r2=methodToCall, r3=self, lr=subModes
stmfd sp!, {r0-r3} @ save all but subModes
- mov r0, rPC
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPreNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPreNativeInvoke @ (methodToCall, self)
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
@ Finish up any post-invoke subMode requirements
- mov r0, rPC
+ @ interpSave already up-to-date
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPostNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPostNativeInvoke @ (methodToCall, self)
b 7b @ resume
.LstackOverflow: @ r0=methodToCall
19:
@ Handle special actions
@ On entry, r0: StackSaveArea
- ldr r2, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
- mov r1, rPC
+ ldr r1, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str r1, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF
- bl dvmReportReturn @ (self, pc, prevFP)
+ bl dvmReportReturn @ (self)
SAVEAREA_FROM_FP(r0, rFP) @ restore StackSaveArea
b 14b @ continue
@ Manage debugger bookkeeping
7:
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF @ arg0<- self
- ldr r1, [rSELF, #offThread_method] @ arg1<- curMethod
- mov r2, rPC @ arg2<- pc
- mov r3, rFP @ arg3<- fp
- bl dvmReportExceptionThrow @ (self, method, pc, fp)
+ mov r1, r9 @ arg1<- exception
+ bl dvmReportExceptionThrow @ (self, exception)
b 8b @ resume with normal handling
.LnotCaughtLocally: @ r9=exception
*/
.LOP_EXECUTE_INLINE_continue:
rsb r0, r0, #4 @ r0<- 4-r0
- FETCH(r9, 2) @ r9<- FEDC
+ FETCH(rINST, 2) @ rINST<- FEDC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
bl common_abort @ (skipped due to ARM prefetch)
-4: and ip, r9, #0xf000 @ isolate F
+4: and ip, rINST, #0xf000 @ isolate F
ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
-3: and ip, r9, #0x0f00 @ isolate E
+3: and ip, rINST, #0x0f00 @ isolate E
ldr r2, [rFP, ip, lsr #6] @ r2<- vE
-2: and ip, r9, #0x00f0 @ isolate D
+2: and ip, rINST, #0x00f0 @ isolate D
ldr r1, [rFP, ip, lsr #2] @ r1<- vD
-1: and ip, r9, #0x000f @ isolate C
+1: and ip, rINST, #0x000f @ isolate C
ldr r0, [rFP, ip, lsl #2] @ r0<- vC
0:
- ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
- ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
+ ldr rINST, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
+ ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
/*
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .LOP_EXECUTE_INLINE_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
mov r0, rINST, lsr #12 @ r0<- B
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_continue @ make call; will return after
+ mov rINST, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, r9 @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp rINST, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .LOP_EXECUTE_INLINE_RANGE_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
sub sp, sp, #8 @ make room for arg, +64 bit align
- mov r0, rINST, lsr #12 @ r0<- B
+ mov r0, rINST, lsr #8 @ r0<- B
+ mov rINST, r9 @ rINST<- method
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after
+ mov r9, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, rINST @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp r9, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
2:
@ Profiling - record method entry. r0: methodToCall
stmfd sp!, {r0-r3} @ preserve r0-r3
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r1, r0
mov r0, rSELF
bl dvmReportInvoke @ (self, method)
11:
@ r0=newFp, r1=&retval, r2=methodToCall, r3=self, lr=subModes
stmfd sp!, {r0-r3} @ save all but subModes
- mov r0, rPC
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPreNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPreNativeInvoke @ (methodToCall, self)
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
@ Finish up any post-invoke subMode requirements
- mov r0, rPC
+ @ interpSave already up-to-date
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPostNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPostNativeInvoke @ (methodToCall, self)
b 7b @ resume
.LstackOverflow: @ r0=methodToCall
19:
@ Handle special actions
@ On entry, r0: StackSaveArea
- ldr r2, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
- mov r1, rPC
+ ldr r1, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str r1, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF
- bl dvmReportReturn @ (self, pc, prevFP)
+ bl dvmReportReturn @ (self)
SAVEAREA_FROM_FP(r0, rFP) @ restore StackSaveArea
b 14b @ continue
@ Manage debugger bookkeeping
7:
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF @ arg0<- self
- ldr r1, [rSELF, #offThread_method] @ arg1<- curMethod
- mov r2, rPC @ arg2<- pc
- mov r3, rFP @ arg3<- fp
- bl dvmReportExceptionThrow @ (self, method, pc, fp)
+ mov r1, r9 @ arg1<- exception
+ bl dvmReportExceptionThrow @ (self, exception)
b 8b @ resume with normal handling
.LnotCaughtLocally: @ r9=exception
GOTO_NEXT # jump to methodToCall->insns
2:
- /* Live: %eax, %ecx, %edx - preserve */
+ /*
+ * On entry, preserve all:
+ * %eax: method
+ * %ecx: self
+ * %edx: new save area
+ */
SPILL_TMP1(%eax) # preserve methodToCall
SPILL_TMP2(%edx) # preserve newSaveArea
+ movl rPC, offThread_pc(%ecx) # update interpSave.pc
+ movl rFP, offThread_fp(%ecx) # update interpSave.fp
movl %ecx, OUT_ARG0(%esp)
- movl %eax, OUT_ARG0(%esp)
+ movl %eax, OUT_ARG1(%esp)
call dvmReportInvoke # (self, method)
UNSPILL_TMP1(%eax)
UNSPILL_TMP2(%edx)
* %eax=methodToCall, rINST=newFP, %ecx=self
*/
SPILL_TMP1(%eax) # save methodTocall
- movl %eax, OUT_ARG2(%esp)
- movl %ecx, OUT_ARG1(%esp)
- movl rPC, OUT_ARG0(%esp)
- call dvmReportPreNativeInvoke # (pc, self, methodToCall)
+ movl rPC, offThread_pc(%ecx)
+ movl rFP, offThread_fp(%ecx)
+ movl %ecx, OUT_ARG0(%esp)
+ movl %eax, OUT_ARG1(%esp)
+ call dvmReportPreNativeInvoke # (self, methodToCall)
UNSPILL_TMP1(%eax) # restore methodToCall
movl rSELF,%ecx # restore self
UNSPILL_TMP1(%eax) # restore methodToCall
movl rSELF, %ecx
- movl %eax, OUT_ARG2(%esp)
- movl %ecx, OUT_ARG1(%esp)
- movl rPC, OUT_ARG0(%esp)
- call dvmReportPostNativeInvoke # (pc, self, methodToCall)
+ movl %ecx, OUT_ARG0(%esp)
+ movl %eax, OUT_ARG1(%esp)
+ call dvmReportPostNativeInvoke # (self, methodToCall)
jmp 7b # rejoin
.LstackOverflow: # eax=methodToCall
* Handle special subMode actions
* On entry, rFP: prevFP, %ecx: self, %eax: saveArea
*/
- movl rFP, OUT_ARG2(%esp) # parameter prevFP
- movl rPC, OUT_ARG1(%esp) # parameter pc
- movl %ecx, OUT_ARG1(%esp) # parameter self
- call dvmReportReturn # (self, pc, prevFP)
+ movl rFP, offThread_fp(%ecx) # update interpSave.fp
+ movl rPC, offThread_pc(%ecx) # update interpSave.pc
+ movl %ecx, OUT_ARG0(%esp) # parameter self
+ call dvmReportReturn # (self)
movl rSELF, %ecx # restore self
SAVEAREA_FROM_FP %eax # restore saveArea
jmp 14b
#define JIT_STUB_HACK(x)
#endif
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
/*
* Opcode handler framing macros. Here, each opcode is a separate function
* for the sake of anything that needs to do disambiguation in a
* common handler with INST_INST.
*
- * The breakpoint itself is handled over in dvmUpdateDebugger(),
+ * The breakpoint itself is handled over in updateDebugger(),
* because we need to detect other events (method entry, single
* step) and report them in the same event packet, and we're not
* yet handling those through breakpoint instructions. By the
/* Handle any special subMode requirements */
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportReturn(self, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportReturn(self);
}
if (dvmIsBreakFrame(fp)) {
/* update thread FP, and reset local variables */
self->curFrame = fp;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = saveArea->savedPc;
* debugger.
*/
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportExceptionThrow(self, curMethod, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportExceptionThrow(self, exception);
}
/*
*/
//fp = (u4*) self->curFrame;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = curMethod->insns + catchRelPc;
* calls. For native calls, we'll mark EXIT on return.
* For non-native calls, EXIT is marked in the RETURN op.
*/
+ PC_FP_TO_SELF();
dvmReportInvoke(self, methodToCall);
}
* frame pointer and other local state, and continue.
*/
curMethod = methodToCall;
+ self->interpSave.method = curMethod;
methodClassDex = curMethod->clazz->pDvmDex;
pc = methodToCall->insns;
self->curFrame = fp = newFp;
DUMP_REGS(methodToCall, newFp, true); // show input args
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPreNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPreNativeInvoke(methodToCall, self);
}
ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
(*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPostNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPostNativeInvoke(methodToCall, self);
}
/* pop frame off */
#define JIT_STUB_HACK(x)
#endif
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
/*
* Opcode handler framing macros. Here, each opcode is a separate function
#define JIT_STUB_HACK(x)
#endif
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
/*
* Opcode handler framing macros. Here, each opcode is a separate function
#define JIT_STUB_HACK(x)
#endif
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
/*
* Opcode handler framing macros. Here, each opcode is a separate function
#define JIT_STUB_HACK(x)
#endif
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
/*
* Opcode handler framing macros. Here, each opcode is a separate function
#define JIT_STUB_HACK(x)
/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF() \
+ self->interpSave.pc = pc; \
+ self->interpSave.fp = fp;
+
+/*
* Instruction framing. For a switch-oriented implementation this is
* case/break, for a threaded implementation it's a goto label and an
* instruction fetch/computed goto.
* for the sake of anything that needs to do disambiguation in a
* common handler with INST_INST.
*
- * The breakpoint itself is handled over in dvmUpdateDebugger(),
+ * The breakpoint itself is handled over in updateDebugger(),
* because we need to detect other events (method entry, single
* step) and report them in the same event packet, and we're not
* yet handling those through breakpoint instructions. By the
/* Handle any special subMode requirements */
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportReturn(self, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportReturn(self);
}
if (dvmIsBreakFrame(fp)) {
/* update thread FP, and reset local variables */
self->curFrame = fp;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = saveArea->savedPc;
* debugger.
*/
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportExceptionThrow(self, curMethod, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportExceptionThrow(self, exception);
}
/*
*/
//fp = (u4*) self->curFrame;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = curMethod->insns + catchRelPc;
* calls. For native calls, we'll mark EXIT on return.
* For non-native calls, EXIT is marked in the RETURN op.
*/
+ PC_FP_TO_SELF();
dvmReportInvoke(self, methodToCall);
}
* frame pointer and other local state, and continue.
*/
curMethod = methodToCall;
+ self->interpSave.method = curMethod;
methodClassDex = curMethod->clazz->pDvmDex;
pc = methodToCall->insns;
self->curFrame = fp = newFp;
DUMP_REGS(methodToCall, newFp, true); // show input args
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPreNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPreNativeInvoke(methodToCall, self);
}
ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
(*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPostNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPostNativeInvoke(methodToCall, self);
}
/* pop frame off */
#define JIT_STUB_HACK(x)
#endif
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
/*
* Opcode handler framing macros. Here, each opcode is a separate function
* for the sake of anything that needs to do disambiguation in a
* common handler with INST_INST.
*
- * The breakpoint itself is handled over in dvmUpdateDebugger(),
+ * The breakpoint itself is handled over in updateDebugger(),
* because we need to detect other events (method entry, single
* step) and report them in the same event packet, and we're not
* yet handling those through breakpoint instructions. By the
/* Handle any special subMode requirements */
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportReturn(self, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportReturn(self);
}
if (dvmIsBreakFrame(fp)) {
/* update thread FP, and reset local variables */
self->curFrame = fp;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = saveArea->savedPc;
* debugger.
*/
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportExceptionThrow(self, curMethod, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportExceptionThrow(self, exception);
}
/*
*/
//fp = (u4*) self->curFrame;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = curMethod->insns + catchRelPc;
* calls. For native calls, we'll mark EXIT on return.
* For non-native calls, EXIT is marked in the RETURN op.
*/
+ PC_FP_TO_SELF();
dvmReportInvoke(self, methodToCall);
}
* frame pointer and other local state, and continue.
*/
curMethod = methodToCall;
+ self->interpSave.method = curMethod;
methodClassDex = curMethod->clazz->pDvmDex;
pc = methodToCall->insns;
self->curFrame = fp = newFp;
DUMP_REGS(methodToCall, newFp, true); // show input args
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPreNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPreNativeInvoke(methodToCall, self);
}
ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
(*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPostNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPostNativeInvoke(methodToCall, self);
}
/* pop frame off */
#define JIT_STUB_HACK(x)
#endif
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
/*
* Opcode handler framing macros. Here, each opcode is a separate function
/* Handle any special subMode requirements */
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportReturn(self, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportReturn(self);
}
if (dvmIsBreakFrame(fp)) {
/* update thread FP, and reset local variables */
self->curFrame = fp;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = saveArea->savedPc;
* debugger.
*/
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportExceptionThrow(self, curMethod, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportExceptionThrow(self, exception);
}
/*
*/
//fp = (u4*) self->curFrame;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = curMethod->insns + catchRelPc;
* calls. For native calls, we'll mark EXIT on return.
* For non-native calls, EXIT is marked in the RETURN op.
*/
+ PC_FP_TO_SELF();
dvmReportInvoke(self, methodToCall);
}
* frame pointer and other local state, and continue.
*/
curMethod = methodToCall;
+ self->interpSave.method = curMethod;
methodClassDex = curMethod->clazz->pDvmDex;
pc = methodToCall->insns;
self->curFrame = fp = newFp;
DUMP_REGS(methodToCall, newFp, true); // show input args
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPreNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPreNativeInvoke(methodToCall, self);
}
ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
(*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPostNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPostNativeInvoke(methodToCall, self);
}
/* pop frame off */
#define JIT_STUB_HACK(x)
/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF() \
+ self->interpSave.pc = pc; \
+ self->interpSave.fp = fp;
+
+/*
* Instruction framing. For a switch-oriented implementation this is
* case/break, for a threaded implementation it's a goto label and an
* instruction fetch/computed goto.
GOTO_NEXT # jump to methodToCall->insns
2:
- /* Live: %eax, %ecx, %edx - preserve */
+ /*
+ * On entry, preserve all:
+ * %eax: method
+ * %ecx: self
+ * %edx: new save area
+ */
SPILL_TMP1(%eax) # preserve methodToCall
SPILL_TMP2(%edx) # preserve newSaveArea
+ movl rPC, offThread_pc(%ecx) # update interpSave.pc
+ movl rFP, offThread_fp(%ecx) # update interpSave.fp
movl %ecx, OUT_ARG0(%esp)
- movl %eax, OUT_ARG0(%esp)
+ movl %eax, OUT_ARG1(%esp)
call dvmReportInvoke # (self, method)
UNSPILL_TMP1(%eax)
UNSPILL_TMP2(%edx)
* %eax=methodToCall, rINST=newFP, %ecx=self
*/
SPILL_TMP1(%eax) # save methodTocall
- movl %eax, OUT_ARG2(%esp)
- movl %ecx, OUT_ARG1(%esp)
- movl rPC, OUT_ARG0(%esp)
- call dvmReportPreNativeInvoke # (pc, self, methodToCall)
+ movl rPC, offThread_pc(%ecx)
+ movl rFP, offThread_fp(%ecx)
+ movl %ecx, OUT_ARG0(%esp)
+ movl %eax, OUT_ARG1(%esp)
+ call dvmReportPreNativeInvoke # (self, methodToCall)
UNSPILL_TMP1(%eax) # restore methodToCall
movl rSELF,%ecx # restore self
UNSPILL_TMP1(%eax) # restore methodToCall
movl rSELF, %ecx
- movl %eax, OUT_ARG2(%esp)
- movl %ecx, OUT_ARG1(%esp)
- movl rPC, OUT_ARG0(%esp)
- call dvmReportPostNativeInvoke # (pc, self, methodToCall)
+ movl %ecx, OUT_ARG0(%esp)
+ movl %eax, OUT_ARG1(%esp)
+ call dvmReportPostNativeInvoke # (self, methodToCall)
jmp 7b # rejoin
.LstackOverflow: # eax=methodToCall
* Handle special subMode actions
* On entry, rFP: prevFP, %ecx: self, %eax: saveArea
*/
- movl rFP, OUT_ARG2(%esp) # parameter prevFP
- movl rPC, OUT_ARG1(%esp) # parameter pc
- movl %ecx, OUT_ARG1(%esp) # parameter self
- call dvmReportReturn # (self, pc, prevFP)
+ movl rFP, offThread_fp(%ecx) # update interpSave.fp
+ movl rPC, offThread_pc(%ecx) # update interpSave.pc
+ movl %ecx, OUT_ARG0(%esp) # parameter self
+ call dvmReportReturn # (self)
movl rSELF, %ecx # restore self
SAVEAREA_FROM_FP %eax # restore saveArea
jmp 14b