From: buzbee Date: Tue, 29 Mar 2011 17:26:07 +0000 (-0700) Subject: Fix interpreter debug attach X-Git-Tag: android-x86-4.0-r1~156^2~9^2 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=99e3e6e72e3471eb85fc2e405866392b01c080fe;p=android-x86%2Fdalvik.git Fix interpreter debug attach Fix a few miscellaneous bugs from the interpreter restructuring that were causing a segfault on debugger attach. Added a sanity checking routine for debugging. Fixed a problem in which the JIT's threshold and on/off switch wouldn't get initialized properly on thread creation. Renamed dvmCompilerStateRefresh() to dvmCompilerUpdateGlobalState() to better reflect its function. Change-Id: I5b8af1ce2175e3c6f53cda19dd8e052a5f355587 --- diff --git a/vm/Debugger.c b/vm/Debugger.c index 2ebc300f7..f7e35d9c9 100644 --- a/vm/Debugger.c +++ b/vm/Debugger.c @@ -399,6 +399,9 @@ void dvmDbgActive(void) dvmInitBreakpoints(); gDvm.debuggerActive = true; dvmUpdateAllInterpBreak(kInterpDebugBreak, kSubModeDebuggerActive, true); +#if defined(WITH_JIT) + dvmCompilerUpdateGlobalState(); +#endif } /* @@ -415,6 +418,9 @@ void dvmDbgDisconnected(void) gDvm.debuggerActive = false; dvmUpdateAllInterpBreak(kInterpDebugBreak, kSubModeDebuggerActive, false); +#if defined(WITH_JIT) + dvmCompilerUpdateGlobalState(); +#endif dvmHashTableLock(gDvm.dbgRegistry); gDvm.debuggerConnected = false; diff --git a/vm/Profile.c b/vm/Profile.c index 1635d7999..6faf72686 100644 --- a/vm/Profile.c +++ b/vm/Profile.c @@ -220,6 +220,10 @@ static void updateActiveProfilers(InterpBreakFlags newBreak, // Tell the threads dvmUpdateAllInterpBreak(newBreak, newMode, enable); +#if defined(WITH_JIT) + dvmCompilerUpdateGlobalState(); +#endif + LOGD("+++ active profiler count now %d\n", newValue); } @@ -748,7 +752,7 @@ void dvmFastMethodTraceExit(Thread* self) /* * Register the METHOD_TRACE_EXIT action for the fast interpreter and * JIT'ed code for JNI methods. The about-to-return JNI callee method is passed - * in explicitly. + * in explicitly. Also used for inline-execute. */ void dvmFastNativeMethodTraceExit(const Method* method, Thread* self) { diff --git a/vm/SignalCatcher.c b/vm/SignalCatcher.c index 5a5e037d5..c8f9833cb 100644 --- a/vm/SignalCatcher.c +++ b/vm/SignalCatcher.c @@ -239,9 +239,10 @@ static void handleSigUsr2(void) dvmCompilerDumpStats(); /* Stress-test unchain all */ dvmJitUnchainAll(); - LOGD("Send %d more signals to rest the code cache", + LOGD("Send %d more signals to reset the code cache", codeCacheResetCount & 7); } + dvmCheckInterpStateConsistency(); } #endif diff --git a/vm/Thread.c b/vm/Thread.c index 3d9663d63..cdf49bc2d 100644 --- a/vm/Thread.c +++ b/vm/Thread.c @@ -1412,6 +1412,9 @@ bool dvmCreateInterpThread(Object* threadObj, int reqStackSize) newThread->prev = gDvm.threadList; gDvm.threadList->next = newThread; + /* Add any existing global modes to the interpBreak control */ + dvmInitializeInterpBreak(newThread); + if (!dvmGetFieldBoolean(threadObj, gDvm.offJavaLangThread_daemon)) gDvm.nonDaemonThreadCount++; // guarded by thread list lock diff --git a/vm/compiler/Compiler.c b/vm/compiler/Compiler.c index a485ca546..dfa6589c0 100644 --- a/vm/compiler/Compiler.c +++ b/vm/compiler/Compiler.c @@ -458,7 +458,7 @@ static bool compilerThreadStartup(void) gDvmJit.pProfTable = dvmDebuggerOrProfilerActive() ? NULL : pJitProfTable; gDvmJit.pProfTableCopy = pJitProfTable; gDvmJit.pJitTraceProfCounters = pJitTraceProfCounters; - dvmJitUpdateState(); + dvmJitUpdateThreadStateAll(); dvmUnlockMutex(&gDvmJit.tableLock); /* Signal running threads to refresh their cached pJitTable pointers */ @@ -739,7 +739,7 @@ void dvmCompilerShutdown(void) /* Disable new translation requests */ gDvmJit.pProfTable = NULL; gDvmJit.pProfTableCopy = NULL; - dvmJitUpdateState(); + dvmJitUpdateThreadStateAll(); if (gDvm.verboseShutdown || gDvmJit.profileMode == kTraceProfilingContinuous) { @@ -774,7 +774,7 @@ void dvmCompilerShutdown(void) */ } -void dvmCompilerStateRefresh() +void dvmCompilerUpdateGlobalState() { bool jitActive; bool jitActivate; @@ -831,5 +831,5 @@ void dvmCompilerStateRefresh() if (needUnchain) dvmJitUnchainAll(); // Make sure all threads have current values - dvmJitUpdateState(); + dvmJitUpdateThreadStateAll(); } diff --git a/vm/compiler/Compiler.h b/vm/compiler/Compiler.h index 5d5036fbd..12add7539 100644 --- a/vm/compiler/Compiler.h +++ b/vm/compiler/Compiler.h @@ -233,7 +233,7 @@ void dvmCompilerDataFlowAnalysisDispatcher(struct CompilationUnit *cUnit, bool isIterative); void dvmCompilerMethodSSATransformation(struct CompilationUnit *cUnit); bool dvmCompilerBuildLoop(struct CompilationUnit *cUnit); -void dvmCompilerStateRefresh(void); +void dvmCompilerUpdateGlobalState(void); JitTraceDescription *dvmCopyTraceDescriptor(const u2 *pc, const struct JitEntry *desc); void *dvmCompilerGetInterpretTemplate(); diff --git a/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c b/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c index c1792eda1..3d8505221 100644 --- a/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c +++ b/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c @@ -84,7 +84,7 @@ bool dvmCompilerArchVariantInit(void) gDvmJit.disableOpt |= (1 << kMethodJit); // Make sure all threads have current values - dvmJitUpdateState(); + dvmJitUpdateThreadStateAll(); return true; } diff --git a/vm/compiler/codegen/arm/armv5te/ArchVariant.c b/vm/compiler/codegen/arm/armv5te/ArchVariant.c index 817b68a30..57a8c8a1d 100644 --- a/vm/compiler/codegen/arm/armv5te/ArchVariant.c +++ b/vm/compiler/codegen/arm/armv5te/ArchVariant.c @@ -84,7 +84,7 @@ bool dvmCompilerArchVariantInit(void) gDvmJit.disableOpt |= (1 << kMethodJit); // Make sure all threads have current values - dvmJitUpdateState(); + dvmJitUpdateThreadStateAll(); return true; } diff --git a/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c b/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c index ff8066277..59d7c95cd 100644 --- a/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c +++ b/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c @@ -79,7 +79,7 @@ bool dvmCompilerArchVariantInit(void) gDvmJit.disableOpt |= (1 << kMethodJit); // Make sure all threads have current values - dvmJitUpdateState(); + dvmJitUpdateThreadStateAll(); return true; } diff --git a/vm/compiler/codegen/arm/armv7-a/ArchVariant.c b/vm/compiler/codegen/arm/armv7-a/ArchVariant.c index ff8066277..59d7c95cd 100644 --- a/vm/compiler/codegen/arm/armv7-a/ArchVariant.c +++ b/vm/compiler/codegen/arm/armv7-a/ArchVariant.c @@ -79,7 +79,7 @@ bool dvmCompilerArchVariantInit(void) gDvmJit.disableOpt |= (1 << kMethodJit); // Make sure all threads have current values - dvmJitUpdateState(); + dvmJitUpdateThreadStateAll(); return true; } diff --git a/vm/compiler/codegen/x86/ia32/ArchVariant.c b/vm/compiler/codegen/x86/ia32/ArchVariant.c index 2abac8823..90f14a33a 100644 --- a/vm/compiler/codegen/x86/ia32/ArchVariant.c +++ b/vm/compiler/codegen/x86/ia32/ArchVariant.c @@ -78,7 +78,7 @@ bool dvmCompilerArchVariantInit(void) sizeof(struct JitToInterpEntries)) <= 128); // Make sure all threads have current values - dvmJitUpdateState(); + dvmJitUpdateThreadStateAll(); return true; } diff --git a/vm/compiler/template/armv5te/TEMPLATE_MONITOR_ENTER_DEBUG.S b/vm/compiler/template/armv5te/TEMPLATE_MONITOR_ENTER_DEBUG.S index 56027fd91..26954838b 100644 --- a/vm/compiler/template/armv5te/TEMPLATE_MONITOR_ENTER_DEBUG.S +++ b/vm/compiler/template/armv5te/TEMPLATE_MONITOR_ENTER_DEBUG.S @@ -13,7 +13,7 @@ mov r3, #0 @ Record that we're not returning str r3, [r0, #offThread_inJitCodeCache] blx r2 @ dvmLockObject(self, obj) - @ refresh Jit's on/off status & test for exception + @ test for exception ldr r1, [rSELF, #offThread_exception] cmp r1, #0 beq 1f diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S index 44accab58..58c3ebf92 100644 --- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S +++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S @@ -1442,7 +1442,7 @@ dvmCompiler_TEMPLATE_MONITOR_ENTER_DEBUG: mov r3, #0 @ Record that we're not returning str r3, [r0, #offThread_inJitCodeCache] blx r2 @ dvmLockObject(self, obj) - @ refresh Jit's on/off status & test for exception + @ test for exception ldr r1, [rSELF, #offThread_exception] cmp r1, #0 beq 1f diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S index 3b5c85783..102148159 100644 --- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S +++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S @@ -1173,7 +1173,7 @@ dvmCompiler_TEMPLATE_MONITOR_ENTER_DEBUG: mov r3, #0 @ Record that we're not returning str r3, [r0, #offThread_inJitCodeCache] blx r2 @ dvmLockObject(self, obj) - @ refresh Jit's on/off status & test for exception + @ test for exception ldr r1, [rSELF, #offThread_exception] cmp r1, #0 beq 1f diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S index 3905ec8d6..804b471c7 100644 --- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S +++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S @@ -1442,7 +1442,7 @@ dvmCompiler_TEMPLATE_MONITOR_ENTER_DEBUG: mov r3, #0 @ Record that we're not returning str r3, [r0, #offThread_inJitCodeCache] blx r2 @ dvmLockObject(self, obj) - @ refresh Jit's on/off status & test for exception + @ test for exception ldr r1, [rSELF, #offThread_exception] cmp r1, #0 beq 1f diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S index b09bc302d..d8713d9ba 100644 --- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S +++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S @@ -1442,7 +1442,7 @@ dvmCompiler_TEMPLATE_MONITOR_ENTER_DEBUG: mov r3, #0 @ Record that we're not returning str r3, [r0, #offThread_inJitCodeCache] blx r2 @ dvmLockObject(self, obj) - @ refresh Jit's on/off status & test for exception + @ test for exception ldr r1, [rSELF, #offThread_exception] cmp r1, #0 beq 1f diff --git a/vm/interp/Interp.c b/vm/interp/Interp.c index 98f478ee8..b4a5ec964 100644 --- a/vm/interp/Interp.c +++ b/vm/interp/Interp.c @@ -596,13 +596,14 @@ void dvmClearSingleStep(Thread* thread) /* * The interpreter just threw. Handle any special subMode requirements. + * All interpSave state must be valid on entry. */ -void dvmReportExceptionThrow(Thread* self, const Method* curMethod, - const u2* pc, void* fp) +void dvmReportExceptionThrow(Thread* self, Object* exception) { + const Method* curMethod = self->interpSave.method; #if defined(WITH_JIT) if (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) { - dvmJitEndTraceSelect(self, pc); + dvmJitEndTraceSelect(self, self->interpSave.pc); } if (self->interpBreak.ctl.breakFlags & kInterpSingleStep) { /* Discard any single-step native returns to translation */ @@ -611,17 +612,18 @@ void dvmReportExceptionThrow(Thread* self, const Method* curMethod, #endif if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) { void *catchFrame; - int offset = pc - curMethod->insns; - int catchRelPc = dvmFindCatchBlock(self, offset, self->exception, + int offset = self->interpSave.pc - curMethod->insns; + int catchRelPc = dvmFindCatchBlock(self, offset, exception, true, &catchFrame); - dvmDbgPostException(fp, offset, catchFrame, catchRelPc, - self->exception); + dvmDbgPostException(self->interpSave.fp, offset, catchFrame, + catchRelPc, exception); } } /* * The interpreter is preparing to do an invoke (both native & normal). - * Handle any special subMode requirements. + * Handle any special subMode requirements. All interpSave state + * must be valid on entry. */ void dvmReportInvoke(Thread* self, const Method* methodToCall) { @@ -632,10 +634,10 @@ void dvmReportInvoke(Thread* self, const Method* methodToCall) * The interpreter is preparing to do a native invoke. Handle any * special subMode requirements. NOTE: for a native invoke, * dvmReportInvoke() and dvmReportPreNativeInvoke() will both - * be called prior to the invoke. + * be called prior to the invoke. All interpSave state must + * be valid on entry. */ -void dvmReportPreNativeInvoke(const u2* pc, Thread* self, - const Method* methodToCall) +void dvmReportPreNativeInvoke(const Method* methodToCall, Thread* self) { #if defined(WITH_JIT) /* @@ -643,7 +645,7 @@ void dvmReportPreNativeInvoke(const u2* pc, Thread* self, * builder can't follow into or through a native method. */ if (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) { - dvmCheckJit(pc, self); + dvmCheckJit(self->interpSave.pc, self); } #endif if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) { @@ -656,10 +658,10 @@ void dvmReportPreNativeInvoke(const u2* pc, Thread* self, /* * The interpreter has returned from a native invoke. Handle any - * special subMode requirements. + * special subMode requirements. All interpSave state must be + * valid on entry. */ -void dvmReportPostNativeInvoke(const u2* pc, Thread* self, - const Method* methodToCall) +void dvmReportPostNativeInvoke(const Method* methodToCall, Thread* self) { if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) { Object* thisPtr = dvmGetThisPtr(self->interpSave.method, @@ -674,15 +676,15 @@ void dvmReportPostNativeInvoke(const u2* pc, Thread* self, /* * The interpreter has returned from a normal method. Handle any special - * subMode requirements. + * subMode requirements. All interpSave state must be valid on entry. */ -void dvmReportReturn(Thread* self, const u2* pc, const u4* prevFP) +void dvmReportReturn(Thread* self) { TRACE_METHOD_EXIT(self, self->interpSave.method); #if defined(WITH_JIT) - if (dvmIsBreakFrame(prevFP) && + if (dvmIsBreakFrame(self->interpSave.fp) && (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild)) { - dvmCheckJit(pc, self); + dvmCheckJit(self->interpSave.pc, self); } #endif } @@ -711,8 +713,8 @@ void dvmReportReturn(Thread* self, const u2* pc, const u4* prevFP) * breakpoints. We may be able to speed things up a bit if we don't query * the event list unless we know there's at least one lurking within. */ -void dvmUpdateDebugger(const Method* method, const u2* pc, const u4* fp, - bool methodEntry, Thread* self) +static void updateDebugger(const Method* method, const u2* pc, const u4* fp, + bool methodEntry, Thread* self) { int eventFlags = 0; @@ -1552,6 +1554,54 @@ void dvmUpdateAllInterpBreak(int newBreak, int newMode, bool enable) } /* + * Do a sanity check on interpreter state saved to Thread. + * A failure here doesn't necessarily mean that something is wrong, + * so this code should only be used during development to suggest + * a possible problem. + */ +void dvmCheckInterpStateConsistency() +{ + Thread* self = dvmThreadSelf(); + Thread* thread; + uint8_t breakFlags; + uint8_t subMode; + void* handlerTable; + + dvmLockThreadList(self); + breakFlags = self->interpBreak.ctl.breakFlags; + subMode = self->interpBreak.ctl.subMode; + handlerTable = self->interpBreak.ctl.curHandlerTable; + for (thread = gDvm.threadList; thread != NULL; thread = thread->next) { + if (subMode != thread->interpBreak.ctl.subMode) { + LOGD("Warning: subMode mismatch - 0x%x:0x%x, tid[%d]", + subMode,thread->interpBreak.ctl.subMode,thread->threadId); + } + if (breakFlags != thread->interpBreak.ctl.breakFlags) { + LOGD("Warning: breakFlags mismatch - 0x%x:0x%x, tid[%d]", + breakFlags,thread->interpBreak.ctl.breakFlags,thread->threadId); + } + if (handlerTable != thread->interpBreak.ctl.curHandlerTable) { + LOGD("Warning: curHandlerTable mismatch - 0x%x:0x%x, tid[%d]", + (int)handlerTable,(int)thread->interpBreak.ctl.curHandlerTable, + thread->threadId); + } +#if defined(WITH_JIT) + if (thread->pJitProfTable != gDvmJit.pProfTable) { + LOGD("Warning: pJitProfTable mismatch - 0x%x:0x%x, tid[%d]", + (int)thread->pJitProfTable,(int)gDvmJit.pProfTable, + thread->threadId); + } + if (thread->jitThreshold != gDvmJit.threshold) { + LOGD("Warning: jitThreshold mismatch - 0x%x:0x%x, tid[%d]", + (int)thread->jitThreshold,(int)gDvmJit.threshold, + thread->threadId); + } +#endif + } + dvmUnlockThreadList(); +} + +/* * Arm a safepoint callback for a thread. If funct is null, * clear any pending callback. * TODO: only gc is currently using this feature, and will have @@ -1622,9 +1672,32 @@ void dvmInitInterpreterState(Thread* self) self->icRechainCount = PREDICTED_CHAIN_COUNTER_RECHAIN; self->pProfileCountdown = &gDvmJit.profileCountdown; // Jit state that can change - dvmJitUpdateState(); + dvmJitUpdateThreadStateSingle(self); #endif +} +/* + * For a newly-created thread, we need to start off with interpBreak + * set to any existing global modes. The caller must hold the + * thread list lock. + */ +void dvmInitializeInterpBreak(Thread* thread) +{ + u1 flags = 0; + u1 subModes = 0; + + if (gDvm.instructionCountEnableCount > 0) { + flags |= kInterpInstCountBreak; + subModes |= kSubModeInstCounting; + } + if (dvmIsMethodTraceActive()) { + subModes |= kSubModeMethodTrace; + } + if (gDvm.debuggerActive) { + flags |= kInterpDebugBreak; + subModes |= kSubModeDebuggerActive; + } + dvmUpdateInterpBreak(thread, flags, subModes, true); } /* @@ -1702,8 +1775,8 @@ void dvmCheckBefore(const u2 *pc, u4 *fp, Thread* self) } if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) { - dvmUpdateDebugger(method, pc, fp, - self->debugIsMethodEntry, self); + updateDebugger(method, pc, fp, + self->debugIsMethodEntry, self); } if (gDvm.instructionCountEnableCount != 0) { /* diff --git a/vm/interp/Interp.h b/vm/interp/Interp.h index b6919af15..91e226234 100644 --- a/vm/interp/Interp.h +++ b/vm/interp/Interp.h @@ -77,17 +77,12 @@ void dvmFlushBreakpoints(ClassObject* clazz); /* * Debugger support */ -void dvmUpdateDebugger(const Method* method, const u2* pc, const u4* fp, - bool methodEntry, Thread* self); void dvmCheckBefore(const u2 *dPC, u4 *fp, Thread* self); -void dvmReportExceptionThrow(Thread* self, const Method* curMethod, - const u2* pc, void* fp); -void dvmReportPreNativeInvoke(const u2* pc, Thread* self, - const Method* methodToCall); -void dvmReportPostNativeInvoke(const u2* pc, Thread* self, - const Method* methodToCall); +void dvmReportExceptionThrow(Thread* self, Object* exception); +void dvmReportPreNativeInvoke(const Method* methodToCall, Thread* self); +void dvmReportPostNativeInvoke(const Method* methodToCall, Thread* self); void dvmReportInvoke(Thread* self, const Method* methodToCall); -void dvmReportReturn(Thread* self, const u2* pc, const u4* prevFP); +void dvmReportReturn(Thread* self); /* * Update interpBreak @@ -95,6 +90,8 @@ void dvmReportReturn(Thread* self, const u2* pc, const u4* prevFP); void dvmUpdateInterpBreak(Thread* thread, int newBreak, int newMode, bool enable); void dvmAddToSuspendCounts(Thread* thread, int delta, int dbgDelta); +void dvmCheckInterpStateConsistency(); +void dvmInitializeInterpBreak(Thread* thread); /* * Update interpBreak for all threads diff --git a/vm/interp/Jit.c b/vm/interp/Jit.c index 0274a24ed..936bbc8fb 100644 --- a/vm/interp/Jit.c +++ b/vm/interp/Jit.c @@ -440,7 +440,7 @@ void dvmJitStopTranslationRequests() * free it because some thread may be holding a reference. */ gDvmJit.pProfTable = NULL; - dvmJitUpdateState(); + dvmJitUpdateThreadStateAll(); } #if defined(WITH_JIT_TUNING) @@ -1479,18 +1479,26 @@ void dvmJitTraceProfilingOff() } /* + * Update JIT-specific info in Thread structure for a single thread + */ +void dvmJitUpdateThreadStateSingle(Thread* thread) +{ + thread->pJitProfTable = gDvmJit.pProfTable; + thread->jitThreshold = gDvmJit.threshold; +} + +/* * Walk through the thread list and refresh all local copies of * JIT global state (which was placed there for fast access). */ -void dvmJitUpdateState() +void dvmJitUpdateThreadStateAll() { Thread* self = dvmThreadSelf(); Thread* thread; dvmLockThreadList(self); for (thread = gDvm.threadList; thread != NULL; thread = thread->next) { - thread->pJitProfTable = gDvmJit.pProfTable; - thread->jitThreshold = gDvmJit.threshold; + dvmJitUpdateThreadStateSingle(thread); } dvmUnlockThreadList(); diff --git a/vm/interp/Jit.h b/vm/interp/Jit.h index b7e0f4aa1..7dbe9ac78 100644 --- a/vm/interp/Jit.h +++ b/vm/interp/Jit.h @@ -162,7 +162,8 @@ void dvmJitTraceProfilingOff(void); void dvmJitTraceProfilingOn(void); void dvmJitChangeProfileMode(TraceProfilingModes newState); void dvmJitDumpTraceDesc(JitTraceDescription *trace); -void dvmJitUpdateState(void); +void dvmJitUpdateThreadStateSingle(Thread* threead); +void dvmJitUpdateThreadStateAll(void); void dvmJitResumeTranslation(Thread* self, const u2* pc, const u4* fp); #endif /*_DALVIK_INTERP_JIT*/ diff --git a/vm/mterp/armv5te/OP_EXECUTE_INLINE.S b/vm/mterp/armv5te/OP_EXECUTE_INLINE.S index 8cd5f788d..a77ce12c5 100644 --- a/vm/mterp/armv5te/OP_EXECUTE_INLINE.S +++ b/vm/mterp/armv5te/OP_EXECUTE_INLINE.S @@ -47,20 +47,20 @@ */ .L${opcode}_continue: rsb r0, r0, #4 @ r0<- 4-r0 - FETCH(r9, 2) @ r9<- FEDC + FETCH(rINST, 2) @ rINST<- FEDC add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each bl common_abort @ (skipped due to ARM prefetch) -4: and ip, r9, #0xf000 @ isolate F +4: and ip, rINST, #0xf000 @ isolate F ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) -3: and ip, r9, #0x0f00 @ isolate E +3: and ip, rINST, #0x0f00 @ isolate E ldr r2, [rFP, ip, lsr #6] @ r2<- vE -2: and ip, r9, #0x00f0 @ isolate D +2: and ip, rINST, #0x00f0 @ isolate D ldr r1, [rFP, ip, lsr #2] @ r1<- vD -1: and ip, r9, #0x000f @ isolate C +1: and ip, rINST, #0x000f @ isolate C ldr r0, [rFP, ip, lsl #2] @ r0<- vC 0: - ldr r9, .L${opcode}_table @ table of InlineOperation - ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry + ldr rINST, .L${opcode}_table @ table of InlineOperation + ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry @ (not reached) /* @@ -72,6 +72,7 @@ bl dvmResolveInlineNative cmp r0, #0 @ did it resolve? beq .L${opcode}_resume @ no, just move on + mov r9, r0 @ remember method mov r1, rSELF bl dvmFastMethodTraceEnter @ (method, self) add r1, rSELF, #offThread_retval@ r1<- &self->retval @@ -79,13 +80,13 @@ mov r0, rINST, lsr #12 @ r0<- B str r1, [sp] @ push &self->retval bl .L${opcode}_continue @ make call; will return after + mov rINST, r0 @ save result of inline add sp, sp, #8 @ pop stack - cmp r0, #0 @ test boolean result of inline - beq common_exceptionThrown @ returned false, handle exception - mov r0, r10 - bl dvmResolveInlineNative @ reload method + mov r0, r9 @ r0<- method mov r1, rSELF - bl dvmFastMethodTraceExit @ (method, self) + bl dvmFastNativeMethodTraceExit @ (method, self) + cmp rINST, #0 @ test boolean result of inline + beq common_exceptionThrown @ returned false, handle exception FETCH_ADVANCE_INST(3) @ advance rPC, load rINST GET_INST_OPCODE(ip) @ extract opcode from rINST GOTO_OPCODE(ip) @ jump to next instruction diff --git a/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S b/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S index ba19ca505..b5b9c3287 100644 --- a/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S +++ b/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S @@ -65,20 +65,22 @@ bl dvmResolveInlineNative cmp r0, #0 @ did it resolve? beq .L${opcode}_resume @ no, just move on + mov r9, r0 @ remember method mov r1, rSELF bl dvmFastMethodTraceEnter @ (method, self) add r1, rSELF, #offThread_retval@ r1<- &self->retval sub sp, sp, #8 @ make room for arg, +64 bit align - mov r0, rINST, lsr #12 @ r0<- B + mov r0, rINST, lsr #8 @ r0<- B + mov rINST, r9 @ rINST<- method str r1, [sp] @ push &self->retval bl .L${opcode}_continue @ make call; will return after + mov r9, r0 @ save result of inline add sp, sp, #8 @ pop stack - cmp r0, #0 @ test boolean result of inline - beq common_exceptionThrown @ returned false, handle exception - mov r0, r10 - bl dvmResolveInlineNative @ reload method + mov r0, rINST @ r0<- method mov r1, rSELF - bl dvmFastMethodTraceExit @ (method, self) + bl dvmFastNativeMethodTraceExit @ (method, self) + cmp r9, #0 @ test boolean result of inline + beq common_exceptionThrown @ returned false, handle exception FETCH_ADVANCE_INST(3) @ advance rPC, load rINST GET_INST_OPCODE(ip) @ extract opcode from rINST GOTO_OPCODE(ip) @ jump to next instruction diff --git a/vm/mterp/armv5te/footer.S b/vm/mterp/armv5te/footer.S index 5d35fdefe..b262b1e4f 100644 --- a/vm/mterp/armv5te/footer.S +++ b/vm/mterp/armv5te/footer.S @@ -656,6 +656,8 @@ common_invokeMethodNoRange: 2: @ Profiling - record method entry. r0: methodToCall stmfd sp!, {r0-r3} @ preserve r0-r3 + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str rFP, [rSELF, #offThread_fp] @ update interpSave.fp mov r1, r0 mov r0, rSELF bl dvmReportInvoke @ (self, method) @@ -707,9 +709,11 @@ dalvik_mterp: 11: @ r0=newFp, r1=&retval, r2=methodToCall, r3=self, lr=subModes stmfd sp!, {r0-r3} @ save all but subModes - mov r0, rPC + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str rFP, [rSELF, #offThread_fp] @ update interpSave.fp + mov r0, r2 @ r0<- methodToCall mov r1, rSELF - bl dvmReportPreNativeInvoke @ (pc, self, methodToCall) + bl dvmReportPreNativeInvoke @ (methodToCall, self) ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement @ Call the native method @@ -720,9 +724,10 @@ dalvik_mterp: ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded) @ Finish up any post-invoke subMode requirements - mov r0, rPC + @ interpSave already up-to-date + mov r0, r2 @ r0<- methodToCall mov r1, rSELF - bl dvmReportPostNativeInvoke @ (pc, self, methodToCall) + bl dvmReportPostNativeInvoke @ (methodToCall, self) b 7b @ resume .LstackOverflow: @ r0=methodToCall @@ -815,10 +820,11 @@ common_returnFromMethod: 19: @ Handle special actions @ On entry, r0: StackSaveArea - ldr r2, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP - mov r1, rPC + ldr r1, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str r1, [rSELF, #offThread_fp] @ update interpSave.fp mov r0, rSELF - bl dvmReportReturn @ (self, pc, prevFP) + bl dvmReportReturn @ (self) SAVEAREA_FROM_FP(r0, rFP) @ restore StackSaveArea b 14b @ continue @@ -924,11 +930,11 @@ common_exceptionThrown: @ Manage debugger bookkeeping 7: + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str rFP, [rSELF, #offThread_fp] @ update interpSave.fp mov r0, rSELF @ arg0<- self - ldr r1, [rSELF, #offThread_method] @ arg1<- curMethod - mov r2, rPC @ arg2<- pc - mov r3, rFP @ arg3<- fp - bl dvmReportExceptionThrow @ (self, method, pc, fp) + mov r1, r9 @ arg1<- exception + bl dvmReportExceptionThrow @ (self, exception) b 8b @ resume with normal handling .LnotCaughtLocally: @ r9=exception diff --git a/vm/mterp/c/OP_BREAKPOINT.c b/vm/mterp/c/OP_BREAKPOINT.c index 0c0cbc8bd..fa0711e9d 100644 --- a/vm/mterp/c/OP_BREAKPOINT.c +++ b/vm/mterp/c/OP_BREAKPOINT.c @@ -8,7 +8,7 @@ HANDLE_OPCODE(OP_BREAKPOINT) * for the sake of anything that needs to do disambiguation in a * common handler with INST_INST. * - * The breakpoint itself is handled over in dvmUpdateDebugger(), + * The breakpoint itself is handled over in updateDebugger(), * because we need to detect other events (method entry, single * step) and report them in the same event packet, and we're not * yet handling those through breakpoint instructions. By the diff --git a/vm/mterp/c/gotoTargets.c b/vm/mterp/c/gotoTargets.c index d3190c1b6..50ff67768 100644 --- a/vm/mterp/c/gotoTargets.c +++ b/vm/mterp/c/gotoTargets.c @@ -618,7 +618,8 @@ GOTO_TARGET(returnFromMethod) /* Handle any special subMode requirements */ if (self->interpBreak.ctl.subMode != 0) { - dvmReportReturn(self, pc, fp); + PC_FP_TO_SELF(); + dvmReportReturn(self); } if (dvmIsBreakFrame(fp)) { @@ -630,6 +631,7 @@ GOTO_TARGET(returnFromMethod) /* update thread FP, and reset local variables */ self->curFrame = fp; curMethod = SAVEAREA_FROM_FP(fp)->method; + self->interpSave.method = curMethod; //methodClass = curMethod->clazz; methodClassDex = curMethod->clazz->pDvmDex; pc = saveArea->savedPc; @@ -694,7 +696,8 @@ GOTO_TARGET(exceptionThrown) * debugger. */ if (self->interpBreak.ctl.subMode != 0) { - dvmReportExceptionThrow(self, curMethod, pc, fp); + PC_FP_TO_SELF(); + dvmReportExceptionThrow(self, exception); } /* @@ -768,6 +771,7 @@ GOTO_TARGET(exceptionThrown) */ //fp = (u4*) self->curFrame; curMethod = SAVEAREA_FROM_FP(fp)->method; + self->interpSave.method = curMethod; //methodClass = curMethod->clazz; methodClassDex = curMethod->clazz->pDvmDex; pc = curMethod->insns + catchRelPc; @@ -944,6 +948,7 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, * calls. For native calls, we'll mark EXIT on return. * For non-native calls, EXIT is marked in the RETURN op. */ + PC_FP_TO_SELF(); dvmReportInvoke(self, methodToCall); } @@ -953,6 +958,7 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, * frame pointer and other local state, and continue. */ curMethod = methodToCall; + self->interpSave.method = curMethod; methodClassDex = curMethod->clazz->pDvmDex; pc = methodToCall->insns; self->curFrame = fp = newFp; @@ -973,7 +979,8 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, DUMP_REGS(methodToCall, newFp, true); // show input args if (self->interpBreak.ctl.subMode != 0) { - dvmReportPreNativeInvoke(pc, self, methodToCall); + PC_FP_TO_SELF(); + dvmReportPreNativeInvoke(methodToCall, self); } ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor, @@ -987,7 +994,8 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self); if (self->interpBreak.ctl.subMode != 0) { - dvmReportPostNativeInvoke(pc, self, methodToCall); + PC_FP_TO_SELF(); + dvmReportPostNativeInvoke(methodToCall, self); } /* pop frame off */ diff --git a/vm/mterp/cstubs/stubdefs.c b/vm/mterp/cstubs/stubdefs.c index 00fb8b661..a894f9e8f 100644 --- a/vm/mterp/cstubs/stubdefs.c +++ b/vm/mterp/cstubs/stubdefs.c @@ -37,6 +37,13 @@ #define JIT_STUB_HACK(x) #endif +/* + * InterpSave's pc and fp must be valid when breaking out to a + * "Reportxxx" routine. Because the portable interpreter uses local + * variables for these, we must flush prior. Stubs, however, use + * the interpSave vars directly, so this is a nop for stubs. + */ +#define PC_FP_TO_SELF() /* * Opcode handler framing macros. Here, each opcode is a separate function diff --git a/vm/mterp/out/InterpAsm-armv5te-vfp.S b/vm/mterp/out/InterpAsm-armv5te-vfp.S index 1824d0748..77be63855 100644 --- a/vm/mterp/out/InterpAsm-armv5te-vfp.S +++ b/vm/mterp/out/InterpAsm-armv5te-vfp.S @@ -12611,20 +12611,20 @@ d2l_doconv: */ .LOP_EXECUTE_INLINE_continue: rsb r0, r0, #4 @ r0<- 4-r0 - FETCH(r9, 2) @ r9<- FEDC + FETCH(rINST, 2) @ rINST<- FEDC add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each bl common_abort @ (skipped due to ARM prefetch) -4: and ip, r9, #0xf000 @ isolate F +4: and ip, rINST, #0xf000 @ isolate F ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) -3: and ip, r9, #0x0f00 @ isolate E +3: and ip, rINST, #0x0f00 @ isolate E ldr r2, [rFP, ip, lsr #6] @ r2<- vE -2: and ip, r9, #0x00f0 @ isolate D +2: and ip, rINST, #0x00f0 @ isolate D ldr r1, [rFP, ip, lsr #2] @ r1<- vD -1: and ip, r9, #0x000f @ isolate C +1: and ip, rINST, #0x000f @ isolate C ldr r0, [rFP, ip, lsl #2] @ r0<- vC 0: - ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation - ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry + ldr rINST, .LOP_EXECUTE_INLINE_table @ table of InlineOperation + ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry @ (not reached) /* @@ -12636,6 +12636,7 @@ d2l_doconv: bl dvmResolveInlineNative cmp r0, #0 @ did it resolve? beq .LOP_EXECUTE_INLINE_resume @ no, just move on + mov r9, r0 @ remember method mov r1, rSELF bl dvmFastMethodTraceEnter @ (method, self) add r1, rSELF, #offThread_retval@ r1<- &self->retval @@ -12643,13 +12644,13 @@ d2l_doconv: mov r0, rINST, lsr #12 @ r0<- B str r1, [sp] @ push &self->retval bl .LOP_EXECUTE_INLINE_continue @ make call; will return after + mov rINST, r0 @ save result of inline add sp, sp, #8 @ pop stack - cmp r0, #0 @ test boolean result of inline - beq common_exceptionThrown @ returned false, handle exception - mov r0, r10 - bl dvmResolveInlineNative @ reload method + mov r0, r9 @ r0<- method mov r1, rSELF - bl dvmFastMethodTraceExit @ (method, self) + bl dvmFastNativeMethodTraceExit @ (method, self) + cmp rINST, #0 @ test boolean result of inline + beq common_exceptionThrown @ returned false, handle exception FETCH_ADVANCE_INST(3) @ advance rPC, load rINST GET_INST_OPCODE(ip) @ extract opcode from rINST GOTO_OPCODE(ip) @ jump to next instruction @@ -12696,20 +12697,22 @@ d2l_doconv: bl dvmResolveInlineNative cmp r0, #0 @ did it resolve? beq .LOP_EXECUTE_INLINE_RANGE_resume @ no, just move on + mov r9, r0 @ remember method mov r1, rSELF bl dvmFastMethodTraceEnter @ (method, self) add r1, rSELF, #offThread_retval@ r1<- &self->retval sub sp, sp, #8 @ make room for arg, +64 bit align - mov r0, rINST, lsr #12 @ r0<- B + mov r0, rINST, lsr #8 @ r0<- B + mov rINST, r9 @ rINST<- method str r1, [sp] @ push &self->retval bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after + mov r9, r0 @ save result of inline add sp, sp, #8 @ pop stack - cmp r0, #0 @ test boolean result of inline - beq common_exceptionThrown @ returned false, handle exception - mov r0, r10 - bl dvmResolveInlineNative @ reload method + mov r0, rINST @ r0<- method mov r1, rSELF - bl dvmFastMethodTraceExit @ (method, self) + bl dvmFastNativeMethodTraceExit @ (method, self) + cmp r9, #0 @ test boolean result of inline + beq common_exceptionThrown @ returned false, handle exception FETCH_ADVANCE_INST(3) @ advance rPC, load rINST GET_INST_OPCODE(ip) @ extract opcode from rINST GOTO_OPCODE(ip) @ jump to next instruction @@ -26857,6 +26860,8 @@ common_invokeMethodNoRange: 2: @ Profiling - record method entry. r0: methodToCall stmfd sp!, {r0-r3} @ preserve r0-r3 + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str rFP, [rSELF, #offThread_fp] @ update interpSave.fp mov r1, r0 mov r0, rSELF bl dvmReportInvoke @ (self, method) @@ -26908,9 +26913,11 @@ dalvik_mterp: 11: @ r0=newFp, r1=&retval, r2=methodToCall, r3=self, lr=subModes stmfd sp!, {r0-r3} @ save all but subModes - mov r0, rPC + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str rFP, [rSELF, #offThread_fp] @ update interpSave.fp + mov r0, r2 @ r0<- methodToCall mov r1, rSELF - bl dvmReportPreNativeInvoke @ (pc, self, methodToCall) + bl dvmReportPreNativeInvoke @ (methodToCall, self) ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement @ Call the native method @@ -26921,9 +26928,10 @@ dalvik_mterp: ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded) @ Finish up any post-invoke subMode requirements - mov r0, rPC + @ interpSave already up-to-date + mov r0, r2 @ r0<- methodToCall mov r1, rSELF - bl dvmReportPostNativeInvoke @ (pc, self, methodToCall) + bl dvmReportPostNativeInvoke @ (methodToCall, self) b 7b @ resume .LstackOverflow: @ r0=methodToCall @@ -27016,10 +27024,11 @@ common_returnFromMethod: 19: @ Handle special actions @ On entry, r0: StackSaveArea - ldr r2, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP - mov r1, rPC + ldr r1, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str r1, [rSELF, #offThread_fp] @ update interpSave.fp mov r0, rSELF - bl dvmReportReturn @ (self, pc, prevFP) + bl dvmReportReturn @ (self) SAVEAREA_FROM_FP(r0, rFP) @ restore StackSaveArea b 14b @ continue @@ -27125,11 +27134,11 @@ common_exceptionThrown: @ Manage debugger bookkeeping 7: + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str rFP, [rSELF, #offThread_fp] @ update interpSave.fp mov r0, rSELF @ arg0<- self - ldr r1, [rSELF, #offThread_method] @ arg1<- curMethod - mov r2, rPC @ arg2<- pc - mov r3, rFP @ arg3<- fp - bl dvmReportExceptionThrow @ (self, method, pc, fp) + mov r1, r9 @ arg1<- exception + bl dvmReportExceptionThrow @ (self, exception) b 8b @ resume with normal handling .LnotCaughtLocally: @ r9=exception diff --git a/vm/mterp/out/InterpAsm-armv5te.S b/vm/mterp/out/InterpAsm-armv5te.S index fc4b63817..b46c3ee5e 100644 --- a/vm/mterp/out/InterpAsm-armv5te.S +++ b/vm/mterp/out/InterpAsm-armv5te.S @@ -13069,20 +13069,20 @@ d2l_doconv: */ .LOP_EXECUTE_INLINE_continue: rsb r0, r0, #4 @ r0<- 4-r0 - FETCH(r9, 2) @ r9<- FEDC + FETCH(rINST, 2) @ rINST<- FEDC add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each bl common_abort @ (skipped due to ARM prefetch) -4: and ip, r9, #0xf000 @ isolate F +4: and ip, rINST, #0xf000 @ isolate F ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) -3: and ip, r9, #0x0f00 @ isolate E +3: and ip, rINST, #0x0f00 @ isolate E ldr r2, [rFP, ip, lsr #6] @ r2<- vE -2: and ip, r9, #0x00f0 @ isolate D +2: and ip, rINST, #0x00f0 @ isolate D ldr r1, [rFP, ip, lsr #2] @ r1<- vD -1: and ip, r9, #0x000f @ isolate C +1: and ip, rINST, #0x000f @ isolate C ldr r0, [rFP, ip, lsl #2] @ r0<- vC 0: - ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation - ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry + ldr rINST, .LOP_EXECUTE_INLINE_table @ table of InlineOperation + ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry @ (not reached) /* @@ -13094,6 +13094,7 @@ d2l_doconv: bl dvmResolveInlineNative cmp r0, #0 @ did it resolve? beq .LOP_EXECUTE_INLINE_resume @ no, just move on + mov r9, r0 @ remember method mov r1, rSELF bl dvmFastMethodTraceEnter @ (method, self) add r1, rSELF, #offThread_retval@ r1<- &self->retval @@ -13101,13 +13102,13 @@ d2l_doconv: mov r0, rINST, lsr #12 @ r0<- B str r1, [sp] @ push &self->retval bl .LOP_EXECUTE_INLINE_continue @ make call; will return after + mov rINST, r0 @ save result of inline add sp, sp, #8 @ pop stack - cmp r0, #0 @ test boolean result of inline - beq common_exceptionThrown @ returned false, handle exception - mov r0, r10 - bl dvmResolveInlineNative @ reload method + mov r0, r9 @ r0<- method mov r1, rSELF - bl dvmFastMethodTraceExit @ (method, self) + bl dvmFastNativeMethodTraceExit @ (method, self) + cmp rINST, #0 @ test boolean result of inline + beq common_exceptionThrown @ returned false, handle exception FETCH_ADVANCE_INST(3) @ advance rPC, load rINST GET_INST_OPCODE(ip) @ extract opcode from rINST GOTO_OPCODE(ip) @ jump to next instruction @@ -13154,20 +13155,22 @@ d2l_doconv: bl dvmResolveInlineNative cmp r0, #0 @ did it resolve? beq .LOP_EXECUTE_INLINE_RANGE_resume @ no, just move on + mov r9, r0 @ remember method mov r1, rSELF bl dvmFastMethodTraceEnter @ (method, self) add r1, rSELF, #offThread_retval@ r1<- &self->retval sub sp, sp, #8 @ make room for arg, +64 bit align - mov r0, rINST, lsr #12 @ r0<- B + mov r0, rINST, lsr #8 @ r0<- B + mov rINST, r9 @ rINST<- method str r1, [sp] @ push &self->retval bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after + mov r9, r0 @ save result of inline add sp, sp, #8 @ pop stack - cmp r0, #0 @ test boolean result of inline - beq common_exceptionThrown @ returned false, handle exception - mov r0, r10 - bl dvmResolveInlineNative @ reload method + mov r0, rINST @ r0<- method mov r1, rSELF - bl dvmFastMethodTraceExit @ (method, self) + bl dvmFastNativeMethodTraceExit @ (method, self) + cmp r9, #0 @ test boolean result of inline + beq common_exceptionThrown @ returned false, handle exception FETCH_ADVANCE_INST(3) @ advance rPC, load rINST GET_INST_OPCODE(ip) @ extract opcode from rINST GOTO_OPCODE(ip) @ jump to next instruction @@ -27315,6 +27318,8 @@ common_invokeMethodNoRange: 2: @ Profiling - record method entry. r0: methodToCall stmfd sp!, {r0-r3} @ preserve r0-r3 + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str rFP, [rSELF, #offThread_fp] @ update interpSave.fp mov r1, r0 mov r0, rSELF bl dvmReportInvoke @ (self, method) @@ -27366,9 +27371,11 @@ dalvik_mterp: 11: @ r0=newFp, r1=&retval, r2=methodToCall, r3=self, lr=subModes stmfd sp!, {r0-r3} @ save all but subModes - mov r0, rPC + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str rFP, [rSELF, #offThread_fp] @ update interpSave.fp + mov r0, r2 @ r0<- methodToCall mov r1, rSELF - bl dvmReportPreNativeInvoke @ (pc, self, methodToCall) + bl dvmReportPreNativeInvoke @ (methodToCall, self) ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement @ Call the native method @@ -27379,9 +27386,10 @@ dalvik_mterp: ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded) @ Finish up any post-invoke subMode requirements - mov r0, rPC + @ interpSave already up-to-date + mov r0, r2 @ r0<- methodToCall mov r1, rSELF - bl dvmReportPostNativeInvoke @ (pc, self, methodToCall) + bl dvmReportPostNativeInvoke @ (methodToCall, self) b 7b @ resume .LstackOverflow: @ r0=methodToCall @@ -27474,10 +27482,11 @@ common_returnFromMethod: 19: @ Handle special actions @ On entry, r0: StackSaveArea - ldr r2, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP - mov r1, rPC + ldr r1, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str r1, [rSELF, #offThread_fp] @ update interpSave.fp mov r0, rSELF - bl dvmReportReturn @ (self, pc, prevFP) + bl dvmReportReturn @ (self) SAVEAREA_FROM_FP(r0, rFP) @ restore StackSaveArea b 14b @ continue @@ -27583,11 +27592,11 @@ common_exceptionThrown: @ Manage debugger bookkeeping 7: + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str rFP, [rSELF, #offThread_fp] @ update interpSave.fp mov r0, rSELF @ arg0<- self - ldr r1, [rSELF, #offThread_method] @ arg1<- curMethod - mov r2, rPC @ arg2<- pc - mov r3, rFP @ arg3<- fp - bl dvmReportExceptionThrow @ (self, method, pc, fp) + mov r1, r9 @ arg1<- exception + bl dvmReportExceptionThrow @ (self, exception) b 8b @ resume with normal handling .LnotCaughtLocally: @ r9=exception diff --git a/vm/mterp/out/InterpAsm-armv7-a-neon.S b/vm/mterp/out/InterpAsm-armv7-a-neon.S index 7132faf3c..4694cf701 100644 --- a/vm/mterp/out/InterpAsm-armv7-a-neon.S +++ b/vm/mterp/out/InterpAsm-armv7-a-neon.S @@ -12549,20 +12549,20 @@ d2l_doconv: */ .LOP_EXECUTE_INLINE_continue: rsb r0, r0, #4 @ r0<- 4-r0 - FETCH(r9, 2) @ r9<- FEDC + FETCH(rINST, 2) @ rINST<- FEDC add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each bl common_abort @ (skipped due to ARM prefetch) -4: and ip, r9, #0xf000 @ isolate F +4: and ip, rINST, #0xf000 @ isolate F ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) -3: and ip, r9, #0x0f00 @ isolate E +3: and ip, rINST, #0x0f00 @ isolate E ldr r2, [rFP, ip, lsr #6] @ r2<- vE -2: and ip, r9, #0x00f0 @ isolate D +2: and ip, rINST, #0x00f0 @ isolate D ldr r1, [rFP, ip, lsr #2] @ r1<- vD -1: and ip, r9, #0x000f @ isolate C +1: and ip, rINST, #0x000f @ isolate C ldr r0, [rFP, ip, lsl #2] @ r0<- vC 0: - ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation - ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry + ldr rINST, .LOP_EXECUTE_INLINE_table @ table of InlineOperation + ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry @ (not reached) /* @@ -12574,6 +12574,7 @@ d2l_doconv: bl dvmResolveInlineNative cmp r0, #0 @ did it resolve? beq .LOP_EXECUTE_INLINE_resume @ no, just move on + mov r9, r0 @ remember method mov r1, rSELF bl dvmFastMethodTraceEnter @ (method, self) add r1, rSELF, #offThread_retval@ r1<- &self->retval @@ -12581,13 +12582,13 @@ d2l_doconv: mov r0, rINST, lsr #12 @ r0<- B str r1, [sp] @ push &self->retval bl .LOP_EXECUTE_INLINE_continue @ make call; will return after + mov rINST, r0 @ save result of inline add sp, sp, #8 @ pop stack - cmp r0, #0 @ test boolean result of inline - beq common_exceptionThrown @ returned false, handle exception - mov r0, r10 - bl dvmResolveInlineNative @ reload method + mov r0, r9 @ r0<- method mov r1, rSELF - bl dvmFastMethodTraceExit @ (method, self) + bl dvmFastNativeMethodTraceExit @ (method, self) + cmp rINST, #0 @ test boolean result of inline + beq common_exceptionThrown @ returned false, handle exception FETCH_ADVANCE_INST(3) @ advance rPC, load rINST GET_INST_OPCODE(ip) @ extract opcode from rINST GOTO_OPCODE(ip) @ jump to next instruction @@ -12634,20 +12635,22 @@ d2l_doconv: bl dvmResolveInlineNative cmp r0, #0 @ did it resolve? beq .LOP_EXECUTE_INLINE_RANGE_resume @ no, just move on + mov r9, r0 @ remember method mov r1, rSELF bl dvmFastMethodTraceEnter @ (method, self) add r1, rSELF, #offThread_retval@ r1<- &self->retval sub sp, sp, #8 @ make room for arg, +64 bit align - mov r0, rINST, lsr #12 @ r0<- B + mov r0, rINST, lsr #8 @ r0<- B + mov rINST, r9 @ rINST<- method str r1, [sp] @ push &self->retval bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after + mov r9, r0 @ save result of inline add sp, sp, #8 @ pop stack - cmp r0, #0 @ test boolean result of inline - beq common_exceptionThrown @ returned false, handle exception - mov r0, r10 - bl dvmResolveInlineNative @ reload method + mov r0, rINST @ r0<- method mov r1, rSELF - bl dvmFastMethodTraceExit @ (method, self) + bl dvmFastNativeMethodTraceExit @ (method, self) + cmp r9, #0 @ test boolean result of inline + beq common_exceptionThrown @ returned false, handle exception FETCH_ADVANCE_INST(3) @ advance rPC, load rINST GET_INST_OPCODE(ip) @ extract opcode from rINST GOTO_OPCODE(ip) @ jump to next instruction @@ -26795,6 +26798,8 @@ common_invokeMethodNoRange: 2: @ Profiling - record method entry. r0: methodToCall stmfd sp!, {r0-r3} @ preserve r0-r3 + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str rFP, [rSELF, #offThread_fp] @ update interpSave.fp mov r1, r0 mov r0, rSELF bl dvmReportInvoke @ (self, method) @@ -26846,9 +26851,11 @@ dalvik_mterp: 11: @ r0=newFp, r1=&retval, r2=methodToCall, r3=self, lr=subModes stmfd sp!, {r0-r3} @ save all but subModes - mov r0, rPC + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str rFP, [rSELF, #offThread_fp] @ update interpSave.fp + mov r0, r2 @ r0<- methodToCall mov r1, rSELF - bl dvmReportPreNativeInvoke @ (pc, self, methodToCall) + bl dvmReportPreNativeInvoke @ (methodToCall, self) ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement @ Call the native method @@ -26859,9 +26866,10 @@ dalvik_mterp: ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded) @ Finish up any post-invoke subMode requirements - mov r0, rPC + @ interpSave already up-to-date + mov r0, r2 @ r0<- methodToCall mov r1, rSELF - bl dvmReportPostNativeInvoke @ (pc, self, methodToCall) + bl dvmReportPostNativeInvoke @ (methodToCall, self) b 7b @ resume .LstackOverflow: @ r0=methodToCall @@ -26954,10 +26962,11 @@ common_returnFromMethod: 19: @ Handle special actions @ On entry, r0: StackSaveArea - ldr r2, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP - mov r1, rPC + ldr r1, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str r1, [rSELF, #offThread_fp] @ update interpSave.fp mov r0, rSELF - bl dvmReportReturn @ (self, pc, prevFP) + bl dvmReportReturn @ (self) SAVEAREA_FROM_FP(r0, rFP) @ restore StackSaveArea b 14b @ continue @@ -27063,11 +27072,11 @@ common_exceptionThrown: @ Manage debugger bookkeeping 7: + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str rFP, [rSELF, #offThread_fp] @ update interpSave.fp mov r0, rSELF @ arg0<- self - ldr r1, [rSELF, #offThread_method] @ arg1<- curMethod - mov r2, rPC @ arg2<- pc - mov r3, rFP @ arg3<- fp - bl dvmReportExceptionThrow @ (self, method, pc, fp) + mov r1, r9 @ arg1<- exception + bl dvmReportExceptionThrow @ (self, exception) b 8b @ resume with normal handling .LnotCaughtLocally: @ r9=exception diff --git a/vm/mterp/out/InterpAsm-armv7-a.S b/vm/mterp/out/InterpAsm-armv7-a.S index 450c5379d..5a291f090 100644 --- a/vm/mterp/out/InterpAsm-armv7-a.S +++ b/vm/mterp/out/InterpAsm-armv7-a.S @@ -12549,20 +12549,20 @@ d2l_doconv: */ .LOP_EXECUTE_INLINE_continue: rsb r0, r0, #4 @ r0<- 4-r0 - FETCH(r9, 2) @ r9<- FEDC + FETCH(rINST, 2) @ rINST<- FEDC add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each bl common_abort @ (skipped due to ARM prefetch) -4: and ip, r9, #0xf000 @ isolate F +4: and ip, rINST, #0xf000 @ isolate F ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) -3: and ip, r9, #0x0f00 @ isolate E +3: and ip, rINST, #0x0f00 @ isolate E ldr r2, [rFP, ip, lsr #6] @ r2<- vE -2: and ip, r9, #0x00f0 @ isolate D +2: and ip, rINST, #0x00f0 @ isolate D ldr r1, [rFP, ip, lsr #2] @ r1<- vD -1: and ip, r9, #0x000f @ isolate C +1: and ip, rINST, #0x000f @ isolate C ldr r0, [rFP, ip, lsl #2] @ r0<- vC 0: - ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation - ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry + ldr rINST, .LOP_EXECUTE_INLINE_table @ table of InlineOperation + ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry @ (not reached) /* @@ -12574,6 +12574,7 @@ d2l_doconv: bl dvmResolveInlineNative cmp r0, #0 @ did it resolve? beq .LOP_EXECUTE_INLINE_resume @ no, just move on + mov r9, r0 @ remember method mov r1, rSELF bl dvmFastMethodTraceEnter @ (method, self) add r1, rSELF, #offThread_retval@ r1<- &self->retval @@ -12581,13 +12582,13 @@ d2l_doconv: mov r0, rINST, lsr #12 @ r0<- B str r1, [sp] @ push &self->retval bl .LOP_EXECUTE_INLINE_continue @ make call; will return after + mov rINST, r0 @ save result of inline add sp, sp, #8 @ pop stack - cmp r0, #0 @ test boolean result of inline - beq common_exceptionThrown @ returned false, handle exception - mov r0, r10 - bl dvmResolveInlineNative @ reload method + mov r0, r9 @ r0<- method mov r1, rSELF - bl dvmFastMethodTraceExit @ (method, self) + bl dvmFastNativeMethodTraceExit @ (method, self) + cmp rINST, #0 @ test boolean result of inline + beq common_exceptionThrown @ returned false, handle exception FETCH_ADVANCE_INST(3) @ advance rPC, load rINST GET_INST_OPCODE(ip) @ extract opcode from rINST GOTO_OPCODE(ip) @ jump to next instruction @@ -12634,20 +12635,22 @@ d2l_doconv: bl dvmResolveInlineNative cmp r0, #0 @ did it resolve? beq .LOP_EXECUTE_INLINE_RANGE_resume @ no, just move on + mov r9, r0 @ remember method mov r1, rSELF bl dvmFastMethodTraceEnter @ (method, self) add r1, rSELF, #offThread_retval@ r1<- &self->retval sub sp, sp, #8 @ make room for arg, +64 bit align - mov r0, rINST, lsr #12 @ r0<- B + mov r0, rINST, lsr #8 @ r0<- B + mov rINST, r9 @ rINST<- method str r1, [sp] @ push &self->retval bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after + mov r9, r0 @ save result of inline add sp, sp, #8 @ pop stack - cmp r0, #0 @ test boolean result of inline - beq common_exceptionThrown @ returned false, handle exception - mov r0, r10 - bl dvmResolveInlineNative @ reload method + mov r0, rINST @ r0<- method mov r1, rSELF - bl dvmFastMethodTraceExit @ (method, self) + bl dvmFastNativeMethodTraceExit @ (method, self) + cmp r9, #0 @ test boolean result of inline + beq common_exceptionThrown @ returned false, handle exception FETCH_ADVANCE_INST(3) @ advance rPC, load rINST GET_INST_OPCODE(ip) @ extract opcode from rINST GOTO_OPCODE(ip) @ jump to next instruction @@ -26795,6 +26798,8 @@ common_invokeMethodNoRange: 2: @ Profiling - record method entry. r0: methodToCall stmfd sp!, {r0-r3} @ preserve r0-r3 + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str rFP, [rSELF, #offThread_fp] @ update interpSave.fp mov r1, r0 mov r0, rSELF bl dvmReportInvoke @ (self, method) @@ -26846,9 +26851,11 @@ dalvik_mterp: 11: @ r0=newFp, r1=&retval, r2=methodToCall, r3=self, lr=subModes stmfd sp!, {r0-r3} @ save all but subModes - mov r0, rPC + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str rFP, [rSELF, #offThread_fp] @ update interpSave.fp + mov r0, r2 @ r0<- methodToCall mov r1, rSELF - bl dvmReportPreNativeInvoke @ (pc, self, methodToCall) + bl dvmReportPreNativeInvoke @ (methodToCall, self) ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement @ Call the native method @@ -26859,9 +26866,10 @@ dalvik_mterp: ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded) @ Finish up any post-invoke subMode requirements - mov r0, rPC + @ interpSave already up-to-date + mov r0, r2 @ r0<- methodToCall mov r1, rSELF - bl dvmReportPostNativeInvoke @ (pc, self, methodToCall) + bl dvmReportPostNativeInvoke @ (methodToCall, self) b 7b @ resume .LstackOverflow: @ r0=methodToCall @@ -26954,10 +26962,11 @@ common_returnFromMethod: 19: @ Handle special actions @ On entry, r0: StackSaveArea - ldr r2, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP - mov r1, rPC + ldr r1, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str r1, [rSELF, #offThread_fp] @ update interpSave.fp mov r0, rSELF - bl dvmReportReturn @ (self, pc, prevFP) + bl dvmReportReturn @ (self) SAVEAREA_FROM_FP(r0, rFP) @ restore StackSaveArea b 14b @ continue @@ -27063,11 +27072,11 @@ common_exceptionThrown: @ Manage debugger bookkeeping 7: + str rPC, [rSELF, #offThread_pc] @ update interpSave.pc + str rFP, [rSELF, #offThread_fp] @ update interpSave.fp mov r0, rSELF @ arg0<- self - ldr r1, [rSELF, #offThread_method] @ arg1<- curMethod - mov r2, rPC @ arg2<- pc - mov r3, rFP @ arg3<- fp - bl dvmReportExceptionThrow @ (self, method, pc, fp) + mov r1, r9 @ arg1<- exception + bl dvmReportExceptionThrow @ (self, exception) b 8b @ resume with normal handling .LnotCaughtLocally: @ r9=exception diff --git a/vm/mterp/out/InterpAsm-x86.S b/vm/mterp/out/InterpAsm-x86.S index cafb89710..ed9b4e99e 100644 --- a/vm/mterp/out/InterpAsm-x86.S +++ b/vm/mterp/out/InterpAsm-x86.S @@ -26330,11 +26330,18 @@ common_invokeMethodNoRange: GOTO_NEXT # jump to methodToCall->insns 2: - /* Live: %eax, %ecx, %edx - preserve */ + /* + * On entry, preserve all: + * %eax: method + * %ecx: self + * %edx: new save area + */ SPILL_TMP1(%eax) # preserve methodToCall SPILL_TMP2(%edx) # preserve newSaveArea + movl rPC, offThread_pc(%ecx) # update interpSave.pc + movl rFP, offThread_fp(%ecx) # update interpSave.fp movl %ecx, OUT_ARG0(%esp) - movl %eax, OUT_ARG0(%esp) + movl %eax, OUT_ARG1(%esp) call dvmReportInvoke # (self, method) UNSPILL_TMP1(%eax) UNSPILL_TMP2(%edx) @@ -26379,10 +26386,11 @@ common_invokeMethodNoRange: * %eax=methodToCall, rINST=newFP, %ecx=self */ SPILL_TMP1(%eax) # save methodTocall - movl %eax, OUT_ARG2(%esp) - movl %ecx, OUT_ARG1(%esp) - movl rPC, OUT_ARG0(%esp) - call dvmReportPreNativeInvoke # (pc, self, methodToCall) + movl rPC, offThread_pc(%ecx) + movl rFP, offThread_fp(%ecx) + movl %ecx, OUT_ARG0(%esp) + movl %eax, OUT_ARG1(%esp) + call dvmReportPreNativeInvoke # (self, methodToCall) UNSPILL_TMP1(%eax) # restore methodToCall movl rSELF,%ecx # restore self @@ -26396,10 +26404,9 @@ common_invokeMethodNoRange: UNSPILL_TMP1(%eax) # restore methodToCall movl rSELF, %ecx - movl %eax, OUT_ARG2(%esp) - movl %ecx, OUT_ARG1(%esp) - movl rPC, OUT_ARG0(%esp) - call dvmReportPostNativeInvoke # (pc, self, methodToCall) + movl %ecx, OUT_ARG0(%esp) + movl %eax, OUT_ARG1(%esp) + call dvmReportPostNativeInvoke # (self, methodToCall) jmp 7b # rejoin .LstackOverflow: # eax=methodToCall @@ -26440,10 +26447,10 @@ common_returnFromMethod: * Handle special subMode actions * On entry, rFP: prevFP, %ecx: self, %eax: saveArea */ - movl rFP, OUT_ARG2(%esp) # parameter prevFP - movl rPC, OUT_ARG1(%esp) # parameter pc - movl %ecx, OUT_ARG1(%esp) # parameter self - call dvmReportReturn # (self, pc, prevFP) + movl rFP, offThread_fp(%ecx) # update interpSave.fp + movl rPC, offThread_pc(%ecx) # update interpSave.pc + movl %ecx, OUT_ARG0(%esp) # parameter self + call dvmReportReturn # (self) movl rSELF, %ecx # restore self SAVEAREA_FROM_FP %eax # restore saveArea jmp 14b diff --git a/vm/mterp/out/InterpC-allstubs.c b/vm/mterp/out/InterpC-allstubs.c index c6784c544..8930c4452 100644 --- a/vm/mterp/out/InterpC-allstubs.c +++ b/vm/mterp/out/InterpC-allstubs.c @@ -419,6 +419,13 @@ static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc) #define JIT_STUB_HACK(x) #endif +/* + * InterpSave's pc and fp must be valid when breaking out to a + * "Reportxxx" routine. Because the portable interpreter uses local + * variables for these, we must flush prior. Stubs, however, use + * the interpSave vars directly, so this is a nop for stubs. + */ +#define PC_FP_TO_SELF() /* * Opcode handler framing macros. Here, each opcode is a separate function @@ -2884,7 +2891,7 @@ HANDLE_OPCODE(OP_BREAKPOINT) * for the sake of anything that needs to do disambiguation in a * common handler with INST_INST. * - * The breakpoint itself is handled over in dvmUpdateDebugger(), + * The breakpoint itself is handled over in updateDebugger(), * because we need to detect other events (method entry, single * step) and report them in the same event packet, and we're not * yet handling those through breakpoint instructions. By the @@ -5034,7 +5041,8 @@ GOTO_TARGET(returnFromMethod) /* Handle any special subMode requirements */ if (self->interpBreak.ctl.subMode != 0) { - dvmReportReturn(self, pc, fp); + PC_FP_TO_SELF(); + dvmReportReturn(self); } if (dvmIsBreakFrame(fp)) { @@ -5046,6 +5054,7 @@ GOTO_TARGET(returnFromMethod) /* update thread FP, and reset local variables */ self->curFrame = fp; curMethod = SAVEAREA_FROM_FP(fp)->method; + self->interpSave.method = curMethod; //methodClass = curMethod->clazz; methodClassDex = curMethod->clazz->pDvmDex; pc = saveArea->savedPc; @@ -5110,7 +5119,8 @@ GOTO_TARGET(exceptionThrown) * debugger. */ if (self->interpBreak.ctl.subMode != 0) { - dvmReportExceptionThrow(self, curMethod, pc, fp); + PC_FP_TO_SELF(); + dvmReportExceptionThrow(self, exception); } /* @@ -5184,6 +5194,7 @@ GOTO_TARGET(exceptionThrown) */ //fp = (u4*) self->curFrame; curMethod = SAVEAREA_FROM_FP(fp)->method; + self->interpSave.method = curMethod; //methodClass = curMethod->clazz; methodClassDex = curMethod->clazz->pDvmDex; pc = curMethod->insns + catchRelPc; @@ -5360,6 +5371,7 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, * calls. For native calls, we'll mark EXIT on return. * For non-native calls, EXIT is marked in the RETURN op. */ + PC_FP_TO_SELF(); dvmReportInvoke(self, methodToCall); } @@ -5369,6 +5381,7 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, * frame pointer and other local state, and continue. */ curMethod = methodToCall; + self->interpSave.method = curMethod; methodClassDex = curMethod->clazz->pDvmDex; pc = methodToCall->insns; self->curFrame = fp = newFp; @@ -5389,7 +5402,8 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, DUMP_REGS(methodToCall, newFp, true); // show input args if (self->interpBreak.ctl.subMode != 0) { - dvmReportPreNativeInvoke(pc, self, methodToCall); + PC_FP_TO_SELF(); + dvmReportPreNativeInvoke(methodToCall, self); } ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor, @@ -5403,7 +5417,8 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self); if (self->interpBreak.ctl.subMode != 0) { - dvmReportPostNativeInvoke(pc, self, methodToCall); + PC_FP_TO_SELF(); + dvmReportPostNativeInvoke(methodToCall, self); } /* pop frame off */ diff --git a/vm/mterp/out/InterpC-armv5te-vfp.c b/vm/mterp/out/InterpC-armv5te-vfp.c index f05c3ee67..bcf03c191 100644 --- a/vm/mterp/out/InterpC-armv5te-vfp.c +++ b/vm/mterp/out/InterpC-armv5te-vfp.c @@ -419,6 +419,13 @@ static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc) #define JIT_STUB_HACK(x) #endif +/* + * InterpSave's pc and fp must be valid when breaking out to a + * "Reportxxx" routine. Because the portable interpreter uses local + * variables for these, we must flush prior. Stubs, however, use + * the interpSave vars directly, so this is a nop for stubs. + */ +#define PC_FP_TO_SELF() /* * Opcode handler framing macros. Here, each opcode is a separate function diff --git a/vm/mterp/out/InterpC-armv5te.c b/vm/mterp/out/InterpC-armv5te.c index 3bf9fd489..9d6e48605 100644 --- a/vm/mterp/out/InterpC-armv5te.c +++ b/vm/mterp/out/InterpC-armv5te.c @@ -419,6 +419,13 @@ static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc) #define JIT_STUB_HACK(x) #endif +/* + * InterpSave's pc and fp must be valid when breaking out to a + * "Reportxxx" routine. Because the portable interpreter uses local + * variables for these, we must flush prior. Stubs, however, use + * the interpSave vars directly, so this is a nop for stubs. + */ +#define PC_FP_TO_SELF() /* * Opcode handler framing macros. Here, each opcode is a separate function diff --git a/vm/mterp/out/InterpC-armv7-a-neon.c b/vm/mterp/out/InterpC-armv7-a-neon.c index d233dbce0..c83d470c9 100644 --- a/vm/mterp/out/InterpC-armv7-a-neon.c +++ b/vm/mterp/out/InterpC-armv7-a-neon.c @@ -419,6 +419,13 @@ static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc) #define JIT_STUB_HACK(x) #endif +/* + * InterpSave's pc and fp must be valid when breaking out to a + * "Reportxxx" routine. Because the portable interpreter uses local + * variables for these, we must flush prior. Stubs, however, use + * the interpSave vars directly, so this is a nop for stubs. + */ +#define PC_FP_TO_SELF() /* * Opcode handler framing macros. Here, each opcode is a separate function diff --git a/vm/mterp/out/InterpC-armv7-a.c b/vm/mterp/out/InterpC-armv7-a.c index 94488e147..98b8a7dc6 100644 --- a/vm/mterp/out/InterpC-armv7-a.c +++ b/vm/mterp/out/InterpC-armv7-a.c @@ -419,6 +419,13 @@ static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc) #define JIT_STUB_HACK(x) #endif +/* + * InterpSave's pc and fp must be valid when breaking out to a + * "Reportxxx" routine. Because the portable interpreter uses local + * variables for these, we must flush prior. Stubs, however, use + * the interpSave vars directly, so this is a nop for stubs. + */ +#define PC_FP_TO_SELF() /* * Opcode handler framing macros. Here, each opcode is a separate function diff --git a/vm/mterp/out/InterpC-portable.c b/vm/mterp/out/InterpC-portable.c index 8d29d72bf..967cae5e9 100644 --- a/vm/mterp/out/InterpC-portable.c +++ b/vm/mterp/out/InterpC-portable.c @@ -396,6 +396,16 @@ static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc) #define JIT_STUB_HACK(x) /* + * InterpSave's pc and fp must be valid when breaking out to a + * "Reportxxx" routine. Because the portable interpreter uses local + * variables for these, we must flush prior. Stubs, however, use + * the interpSave vars directly, so this is a nop for stubs. + */ +#define PC_FP_TO_SELF() \ + self->interpSave.pc = pc; \ + self->interpSave.fp = fp; + +/* * Instruction framing. For a switch-oriented implementation this is * case/break, for a threaded implementation it's a goto label and an * instruction fetch/computed goto. @@ -2897,7 +2907,7 @@ HANDLE_OPCODE(OP_BREAKPOINT) * for the sake of anything that needs to do disambiguation in a * common handler with INST_INST. * - * The breakpoint itself is handled over in dvmUpdateDebugger(), + * The breakpoint itself is handled over in updateDebugger(), * because we need to detect other events (method entry, single * step) and report them in the same event packet, and we're not * yet handling those through breakpoint instructions. By the @@ -4984,7 +4994,8 @@ GOTO_TARGET(returnFromMethod) /* Handle any special subMode requirements */ if (self->interpBreak.ctl.subMode != 0) { - dvmReportReturn(self, pc, fp); + PC_FP_TO_SELF(); + dvmReportReturn(self); } if (dvmIsBreakFrame(fp)) { @@ -4996,6 +5007,7 @@ GOTO_TARGET(returnFromMethod) /* update thread FP, and reset local variables */ self->curFrame = fp; curMethod = SAVEAREA_FROM_FP(fp)->method; + self->interpSave.method = curMethod; //methodClass = curMethod->clazz; methodClassDex = curMethod->clazz->pDvmDex; pc = saveArea->savedPc; @@ -5060,7 +5072,8 @@ GOTO_TARGET(exceptionThrown) * debugger. */ if (self->interpBreak.ctl.subMode != 0) { - dvmReportExceptionThrow(self, curMethod, pc, fp); + PC_FP_TO_SELF(); + dvmReportExceptionThrow(self, exception); } /* @@ -5134,6 +5147,7 @@ GOTO_TARGET(exceptionThrown) */ //fp = (u4*) self->curFrame; curMethod = SAVEAREA_FROM_FP(fp)->method; + self->interpSave.method = curMethod; //methodClass = curMethod->clazz; methodClassDex = curMethod->clazz->pDvmDex; pc = curMethod->insns + catchRelPc; @@ -5310,6 +5324,7 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, * calls. For native calls, we'll mark EXIT on return. * For non-native calls, EXIT is marked in the RETURN op. */ + PC_FP_TO_SELF(); dvmReportInvoke(self, methodToCall); } @@ -5319,6 +5334,7 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, * frame pointer and other local state, and continue. */ curMethod = methodToCall; + self->interpSave.method = curMethod; methodClassDex = curMethod->clazz->pDvmDex; pc = methodToCall->insns; self->curFrame = fp = newFp; @@ -5339,7 +5355,8 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, DUMP_REGS(methodToCall, newFp, true); // show input args if (self->interpBreak.ctl.subMode != 0) { - dvmReportPreNativeInvoke(pc, self, methodToCall); + PC_FP_TO_SELF(); + dvmReportPreNativeInvoke(methodToCall, self); } ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor, @@ -5353,7 +5370,8 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self); if (self->interpBreak.ctl.subMode != 0) { - dvmReportPostNativeInvoke(pc, self, methodToCall); + PC_FP_TO_SELF(); + dvmReportPostNativeInvoke(methodToCall, self); } /* pop frame off */ diff --git a/vm/mterp/out/InterpC-x86-atom.c b/vm/mterp/out/InterpC-x86-atom.c index 1a3542c6c..85414bb55 100644 --- a/vm/mterp/out/InterpC-x86-atom.c +++ b/vm/mterp/out/InterpC-x86-atom.c @@ -419,6 +419,13 @@ static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc) #define JIT_STUB_HACK(x) #endif +/* + * InterpSave's pc and fp must be valid when breaking out to a + * "Reportxxx" routine. Because the portable interpreter uses local + * variables for these, we must flush prior. Stubs, however, use + * the interpSave vars directly, so this is a nop for stubs. + */ +#define PC_FP_TO_SELF() /* * Opcode handler framing macros. Here, each opcode is a separate function @@ -1310,7 +1317,7 @@ HANDLE_OPCODE(OP_BREAKPOINT) * for the sake of anything that needs to do disambiguation in a * common handler with INST_INST. * - * The breakpoint itself is handled over in dvmUpdateDebugger(), + * The breakpoint itself is handled over in updateDebugger(), * because we need to detect other events (method entry, single * step) and report them in the same event packet, and we're not * yet handling those through breakpoint instructions. By the @@ -2123,7 +2130,8 @@ GOTO_TARGET(returnFromMethod) /* Handle any special subMode requirements */ if (self->interpBreak.ctl.subMode != 0) { - dvmReportReturn(self, pc, fp); + PC_FP_TO_SELF(); + dvmReportReturn(self); } if (dvmIsBreakFrame(fp)) { @@ -2135,6 +2143,7 @@ GOTO_TARGET(returnFromMethod) /* update thread FP, and reset local variables */ self->curFrame = fp; curMethod = SAVEAREA_FROM_FP(fp)->method; + self->interpSave.method = curMethod; //methodClass = curMethod->clazz; methodClassDex = curMethod->clazz->pDvmDex; pc = saveArea->savedPc; @@ -2199,7 +2208,8 @@ GOTO_TARGET(exceptionThrown) * debugger. */ if (self->interpBreak.ctl.subMode != 0) { - dvmReportExceptionThrow(self, curMethod, pc, fp); + PC_FP_TO_SELF(); + dvmReportExceptionThrow(self, exception); } /* @@ -2273,6 +2283,7 @@ GOTO_TARGET(exceptionThrown) */ //fp = (u4*) self->curFrame; curMethod = SAVEAREA_FROM_FP(fp)->method; + self->interpSave.method = curMethod; //methodClass = curMethod->clazz; methodClassDex = curMethod->clazz->pDvmDex; pc = curMethod->insns + catchRelPc; @@ -2449,6 +2460,7 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, * calls. For native calls, we'll mark EXIT on return. * For non-native calls, EXIT is marked in the RETURN op. */ + PC_FP_TO_SELF(); dvmReportInvoke(self, methodToCall); } @@ -2458,6 +2470,7 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, * frame pointer and other local state, and continue. */ curMethod = methodToCall; + self->interpSave.method = curMethod; methodClassDex = curMethod->clazz->pDvmDex; pc = methodToCall->insns; self->curFrame = fp = newFp; @@ -2478,7 +2491,8 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, DUMP_REGS(methodToCall, newFp, true); // show input args if (self->interpBreak.ctl.subMode != 0) { - dvmReportPreNativeInvoke(pc, self, methodToCall); + PC_FP_TO_SELF(); + dvmReportPreNativeInvoke(methodToCall, self); } ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor, @@ -2492,7 +2506,8 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self); if (self->interpBreak.ctl.subMode != 0) { - dvmReportPostNativeInvoke(pc, self, methodToCall); + PC_FP_TO_SELF(); + dvmReportPostNativeInvoke(methodToCall, self); } /* pop frame off */ diff --git a/vm/mterp/out/InterpC-x86.c b/vm/mterp/out/InterpC-x86.c index eb54a0f61..dd25604dc 100644 --- a/vm/mterp/out/InterpC-x86.c +++ b/vm/mterp/out/InterpC-x86.c @@ -419,6 +419,13 @@ static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc) #define JIT_STUB_HACK(x) #endif +/* + * InterpSave's pc and fp must be valid when breaking out to a + * "Reportxxx" routine. Because the portable interpreter uses local + * variables for these, we must flush prior. Stubs, however, use + * the interpSave vars directly, so this is a nop for stubs. + */ +#define PC_FP_TO_SELF() /* * Opcode handler framing macros. Here, each opcode is a separate function @@ -2065,7 +2072,8 @@ GOTO_TARGET(returnFromMethod) /* Handle any special subMode requirements */ if (self->interpBreak.ctl.subMode != 0) { - dvmReportReturn(self, pc, fp); + PC_FP_TO_SELF(); + dvmReportReturn(self); } if (dvmIsBreakFrame(fp)) { @@ -2077,6 +2085,7 @@ GOTO_TARGET(returnFromMethod) /* update thread FP, and reset local variables */ self->curFrame = fp; curMethod = SAVEAREA_FROM_FP(fp)->method; + self->interpSave.method = curMethod; //methodClass = curMethod->clazz; methodClassDex = curMethod->clazz->pDvmDex; pc = saveArea->savedPc; @@ -2141,7 +2150,8 @@ GOTO_TARGET(exceptionThrown) * debugger. */ if (self->interpBreak.ctl.subMode != 0) { - dvmReportExceptionThrow(self, curMethod, pc, fp); + PC_FP_TO_SELF(); + dvmReportExceptionThrow(self, exception); } /* @@ -2215,6 +2225,7 @@ GOTO_TARGET(exceptionThrown) */ //fp = (u4*) self->curFrame; curMethod = SAVEAREA_FROM_FP(fp)->method; + self->interpSave.method = curMethod; //methodClass = curMethod->clazz; methodClassDex = curMethod->clazz->pDvmDex; pc = curMethod->insns + catchRelPc; @@ -2391,6 +2402,7 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, * calls. For native calls, we'll mark EXIT on return. * For non-native calls, EXIT is marked in the RETURN op. */ + PC_FP_TO_SELF(); dvmReportInvoke(self, methodToCall); } @@ -2400,6 +2412,7 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, * frame pointer and other local state, and continue. */ curMethod = methodToCall; + self->interpSave.method = curMethod; methodClassDex = curMethod->clazz->pDvmDex; pc = methodToCall->insns; self->curFrame = fp = newFp; @@ -2420,7 +2433,8 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, DUMP_REGS(methodToCall, newFp, true); // show input args if (self->interpBreak.ctl.subMode != 0) { - dvmReportPreNativeInvoke(pc, self, methodToCall); + PC_FP_TO_SELF(); + dvmReportPreNativeInvoke(methodToCall, self); } ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor, @@ -2434,7 +2448,8 @@ GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self); if (self->interpBreak.ctl.subMode != 0) { - dvmReportPostNativeInvoke(pc, self, methodToCall); + PC_FP_TO_SELF(); + dvmReportPostNativeInvoke(methodToCall, self); } /* pop frame off */ diff --git a/vm/mterp/portable/stubdefs.c b/vm/mterp/portable/stubdefs.c index e0523c858..de9a8e7ca 100644 --- a/vm/mterp/portable/stubdefs.c +++ b/vm/mterp/portable/stubdefs.c @@ -14,6 +14,16 @@ #define JIT_STUB_HACK(x) /* + * InterpSave's pc and fp must be valid when breaking out to a + * "Reportxxx" routine. Because the portable interpreter uses local + * variables for these, we must flush prior. Stubs, however, use + * the interpSave vars directly, so this is a nop for stubs. + */ +#define PC_FP_TO_SELF() \ + self->interpSave.pc = pc; \ + self->interpSave.fp = fp; + +/* * Instruction framing. For a switch-oriented implementation this is * case/break, for a threaded implementation it's a goto label and an * instruction fetch/computed goto. diff --git a/vm/mterp/x86/footer.S b/vm/mterp/x86/footer.S index 8aed1333b..b79d8cf30 100644 --- a/vm/mterp/x86/footer.S +++ b/vm/mterp/x86/footer.S @@ -399,11 +399,18 @@ common_invokeMethodNoRange: GOTO_NEXT # jump to methodToCall->insns 2: - /* Live: %eax, %ecx, %edx - preserve */ + /* + * On entry, preserve all: + * %eax: method + * %ecx: self + * %edx: new save area + */ SPILL_TMP1(%eax) # preserve methodToCall SPILL_TMP2(%edx) # preserve newSaveArea + movl rPC, offThread_pc(%ecx) # update interpSave.pc + movl rFP, offThread_fp(%ecx) # update interpSave.fp movl %ecx, OUT_ARG0(%esp) - movl %eax, OUT_ARG0(%esp) + movl %eax, OUT_ARG1(%esp) call dvmReportInvoke # (self, method) UNSPILL_TMP1(%eax) UNSPILL_TMP2(%edx) @@ -448,10 +455,11 @@ common_invokeMethodNoRange: * %eax=methodToCall, rINST=newFP, %ecx=self */ SPILL_TMP1(%eax) # save methodTocall - movl %eax, OUT_ARG2(%esp) - movl %ecx, OUT_ARG1(%esp) - movl rPC, OUT_ARG0(%esp) - call dvmReportPreNativeInvoke # (pc, self, methodToCall) + movl rPC, offThread_pc(%ecx) + movl rFP, offThread_fp(%ecx) + movl %ecx, OUT_ARG0(%esp) + movl %eax, OUT_ARG1(%esp) + call dvmReportPreNativeInvoke # (self, methodToCall) UNSPILL_TMP1(%eax) # restore methodToCall movl rSELF,%ecx # restore self @@ -465,10 +473,9 @@ common_invokeMethodNoRange: UNSPILL_TMP1(%eax) # restore methodToCall movl rSELF, %ecx - movl %eax, OUT_ARG2(%esp) - movl %ecx, OUT_ARG1(%esp) - movl rPC, OUT_ARG0(%esp) - call dvmReportPostNativeInvoke # (pc, self, methodToCall) + movl %ecx, OUT_ARG0(%esp) + movl %eax, OUT_ARG1(%esp) + call dvmReportPostNativeInvoke # (self, methodToCall) jmp 7b # rejoin .LstackOverflow: # eax=methodToCall @@ -509,10 +516,10 @@ common_returnFromMethod: * Handle special subMode actions * On entry, rFP: prevFP, %ecx: self, %eax: saveArea */ - movl rFP, OUT_ARG2(%esp) # parameter prevFP - movl rPC, OUT_ARG1(%esp) # parameter pc - movl %ecx, OUT_ARG1(%esp) # parameter self - call dvmReportReturn # (self, pc, prevFP) + movl rFP, offThread_fp(%ecx) # update interpSave.fp + movl rPC, offThread_pc(%ecx) # update interpSave.pc + movl %ecx, OUT_ARG0(%esp) # parameter self + call dvmReportReturn # (self) movl rSELF, %ecx # restore self SAVEAREA_FROM_FP %eax # restore saveArea jmp 14b