From: Ben Cheng Date: Wed, 31 Mar 2010 18:59:18 +0000 (-0700) Subject: Fix a race condition in JIT state refresh under debugging / misc code cleanup. X-Git-Tag: android-x86-2.2~41 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=a497359afa1abe4c5780c8799c6fe0edab551c2d;p=android-x86%2Fdalvik.git Fix a race condition in JIT state refresh under debugging / misc code cleanup. Bug: 2561283 Change-Id: I9fd94928f3e661de97098808340ea92b28cafa07 --- diff --git a/vm/compiler/Compiler.c b/vm/compiler/Compiler.c index efbe0a5fd..6cb1c775d 100644 --- a/vm/compiler/Compiler.c +++ b/vm/compiler/Compiler.c @@ -385,7 +385,11 @@ bool compilerThreadStartup(void) gDvmJit.jitTableEntriesUsed = 0; gDvmJit.compilerHighWater = COMPILER_WORK_QUEUE_SIZE - (COMPILER_WORK_QUEUE_SIZE/4); - gDvmJit.pProfTable = pJitProfTable; + /* + * If the VM is launched with wait-on-the-debugger, we will need to hide + * the profile table here + */ + gDvmJit.pProfTable = dvmDebuggerOrProfilerActive() ? NULL : pJitProfTable; gDvmJit.pProfTableCopy = pJitProfTable; dvmUnlockMutex(&gDvmJit.tableLock); @@ -708,6 +712,16 @@ void dvmCompilerStateRefresh() bool jitActivate; bool needUnchain = false; + /* + * The tableLock might not be initialized yet by the compiler thread if + * debugger is attached from the very beginning of the VM launch. If + * pProfTableCopy is NULL, the lock is not initialized yet and we don't + * need to refresh anything either. + */ + if (gDvmJit.pProfTableCopy == NULL) { + return; + } + dvmLockMutex(&gDvmJit.tableLock); jitActive = gDvmJit.pProfTable != NULL; jitActivate = !(gDvm.debuggerActive || (gDvm.activeProfilers > 0)); diff --git a/vm/compiler/Compiler.h b/vm/compiler/Compiler.h index 5b667355d..ba23d7df1 100644 --- a/vm/compiler/Compiler.h +++ b/vm/compiler/Compiler.h @@ -79,17 +79,20 @@ typedef struct ICPatchWorkOrder { PredictedChainingCell cellContent; /* content of the new cell */ } ICPatchWorkOrder; +/* States of the dbg interpreter when serving a JIT-related request */ typedef enum JitState { - kJitOff = 0, - kJitNormal = 1, // Profiling in mterp or running native - kJitTSelectRequest = 2, // Transition state - start trace selection - kJitTSelectRequestHot = 3, // Transition state - start hot trace selection - kJitTSelect = 4, // Actively selecting trace in dbg interp - kJitTSelectAbort = 5, // Something threw during selection - abort - kJitTSelectEnd = 6, // Done with the trace - wrap it up - kJitSingleStep = 7, // Single step interpretation - kJitSingleStepEnd = 8, // Done with single step, return to mterp - kJitSelfVerification = 9, // Self Verification Mode + /* Entering states in the debug interpreter */ + kJitNot = 0, // Non-JIT related reasons */ + kJitTSelectRequest = 1, // Request a trace (subject to filtering) + kJitTSelectRequestHot = 2, // Request a hot trace (bypass the filter) + kJitSelfVerification = 3, // Self Verification Mode + + /* Operational states in the debug interpreter */ + kJitTSelect = 4, // Actively selecting a trace + kJitTSelectEnd = 5, // Done with the trace - wrap it up + kJitSingleStep = 6, // Single step interpretation + kJitSingleStepEnd = 7, // Done with single step, ready return to mterp + kJitDone = 8, // Ready to leave the debug interpreter } JitState; #if defined(WITH_SELF_VERIFICATION) diff --git a/vm/compiler/Utility.c b/vm/compiler/Utility.c index 83caab745..b2654c407 100644 --- a/vm/compiler/Utility.c +++ b/vm/compiler/Utility.c @@ -76,7 +76,7 @@ retry: currentArena = newArena; numArenaBlocks++; if (numArenaBlocks > 10) - LOGD("Total arena pages for JIT: %d", numArenaBlocks); + LOGI("Total arena pages for JIT: %d", numArenaBlocks); goto retry; } return NULL; diff --git a/vm/compiler/codegen/arm/ArchUtility.c b/vm/compiler/codegen/arm/ArchUtility.c index 20b3f85a7..b0478f49c 100644 --- a/vm/compiler/codegen/arm/ArchUtility.c +++ b/vm/compiler/codegen/arm/ArchUtility.c @@ -257,41 +257,41 @@ void dvmDumpLIRInsn(LIR *arg, unsigned char *baseAddr) break; case kArmPseudoTargetLabel: break; - case ARM_PSEUDO_kChainingCellBackwardBranch: + case kArmPseudoChainingCellBackwardBranch: LOGD("-------- chaining cell (backward branch): 0x%04x\n", dest); break; - case ARM_PSEUDO_kChainingCellNormal: + case kArmPseudoChainingCellNormal: LOGD("-------- chaining cell (normal): 0x%04x\n", dest); break; - case ARM_PSEUDO_kChainingCellHot: + case kArmPseudoChainingCellHot: LOGD("-------- chaining cell (hot): 0x%04x\n", dest); break; - case ARM_PSEUDO_kChainingCellInvokePredicted: + case kArmPseudoChainingCellInvokePredicted: LOGD("-------- chaining cell (predicted)\n"); break; - case ARM_PSEUDO_kChainingCellInvokeSingleton: + case kArmPseudoChainingCellInvokeSingleton: LOGD("-------- chaining cell (invoke singleton): %s/%p\n", ((Method *)dest)->name, ((Method *)dest)->insns); break; - case ARM_PSEUDO_kEntryBlock: + case kArmPseudoEntryBlock: LOGD("-------- entry offset: 0x%04x\n", dest); break; - case ARM_PSEUDO_kDalvikByteCode_BOUNDARY: + case kArmPseudoDalvikByteCodeBoundary: LOGD("-------- dalvik offset: 0x%04x @ %s\n", dest, (char *) lir->operands[1]); break; - case ARM_PSEUDO_kExitBlock: + case kArmPseudoExitBlock: LOGD("-------- exit offset: 0x%04x\n", dest); break; case kArmPseudoPseudoAlign4: LOGD("%p (%04x): .align4\n", baseAddr + offset, offset); break; - case ARM_PSEUDO_kPCReconstruction_CELL: + case kArmPseudoPCReconstructionCell: LOGD("-------- reconstruct dalvik PC : 0x%04x @ +0x%04x\n", dest, lir->operands[1]); break; - case ARM_PSEUDO_kPCReconstruction_BLOCK_LABEL: + case kArmPseudoPCReconstructionBlockLabel: /* Do nothing */ break; case kArmPseudoEHBlockLabel: diff --git a/vm/compiler/codegen/arm/ArmLIR.h b/vm/compiler/codegen/arm/ArmLIR.h index e1073b604..f7704ade5 100644 --- a/vm/compiler/codegen/arm/ArmLIR.h +++ b/vm/compiler/codegen/arm/ArmLIR.h @@ -318,18 +318,18 @@ typedef enum ArmOpCode { kArmPseudoBarrier = -17, kArmPseudoExtended = -16, kArmPseudoSSARep = -15, - ARM_PSEUDO_kEntryBlock = -14, - ARM_PSEUDO_kExitBlock = -13, + kArmPseudoEntryBlock = -14, + kArmPseudoExitBlock = -13, kArmPseudoTargetLabel = -12, - ARM_PSEUDO_kChainingCellBackwardBranch = -11, - ARM_PSEUDO_kChainingCellHot = -10, - ARM_PSEUDO_kChainingCellInvokePredicted = -9, - ARM_PSEUDO_kChainingCellInvokeSingleton = -8, - ARM_PSEUDO_kChainingCellNormal = -7, - ARM_PSEUDO_kDalvikByteCode_BOUNDARY = -6, + kArmPseudoChainingCellBackwardBranch = -11, + kArmPseudoChainingCellHot = -10, + kArmPseudoChainingCellInvokePredicted = -9, + kArmPseudoChainingCellInvokeSingleton = -8, + kArmPseudoChainingCellNormal = -7, + kArmPseudoDalvikByteCodeBoundary = -6, kArmPseudoPseudoAlign4 = -5, - ARM_PSEUDO_kPCReconstruction_CELL = -4, - ARM_PSEUDO_kPCReconstruction_BLOCK_LABEL = -3, + kArmPseudoPCReconstructionCell = -4, + kArmPseudoPCReconstructionBlockLabel = -3, kArmPseudoEHBlockLabel = -2, kArmPseudoNormalBlockLabel = -1, /************************************************************************/ diff --git a/vm/compiler/codegen/arm/CodegenCommon.c b/vm/compiler/codegen/arm/CodegenCommon.c index 8f5c11da4..0cec99df8 100644 --- a/vm/compiler/codegen/arm/CodegenCommon.c +++ b/vm/compiler/codegen/arm/CodegenCommon.c @@ -370,7 +370,7 @@ extern ArmLIR *genCheckCommon(CompilationUnit *cUnit, int dOffset, if (pcrLabel == NULL) { int dPC = (int) (cUnit->method->insns + dOffset); pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true); - pcrLabel->opCode = ARM_PSEUDO_kPCReconstruction_CELL; + pcrLabel->opCode = kArmPseudoPCReconstructionCell; pcrLabel->operands[0] = dPC; pcrLabel->operands[1] = dOffset; /* Insert the place holder to the growable list */ diff --git a/vm/compiler/codegen/arm/CodegenDriver.c b/vm/compiler/codegen/arm/CodegenDriver.c index e3ac5bb04..695f18c1a 100644 --- a/vm/compiler/codegen/arm/CodegenDriver.c +++ b/vm/compiler/codegen/arm/CodegenDriver.c @@ -911,7 +911,7 @@ static void genReturnCommon(CompilationUnit *cUnit, MIR *mir) ArmLIR *branch = genUnconditionalBranch(cUnit, NULL); /* Set up the place holder to reconstruct this Dalvik PC */ ArmLIR *pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true); - pcrLabel->opCode = ARM_PSEUDO_kPCReconstruction_CELL; + pcrLabel->opCode = kArmPseudoPCReconstructionCell; pcrLabel->operands[0] = dPC; pcrLabel->operands[1] = mir->offset; /* Insert the place holder to the growable list */ @@ -1148,7 +1148,7 @@ static void genInvokeVirtualCommon(CompilationUnit *cUnit, MIR *mir, if (pcrLabel == NULL) { int dPC = (int) (cUnit->method->insns + mir->offset); pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true); - pcrLabel->opCode = ARM_PSEUDO_kPCReconstruction_CELL; + pcrLabel->opCode = kArmPseudoPCReconstructionCell; pcrLabel->operands[0] = dPC; pcrLabel->operands[1] = mir->offset; /* Insert the place holder to the growable list */ @@ -2811,7 +2811,7 @@ static bool handleFmt35c_3rc(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb, if (pcrLabel == NULL) { int dPC = (int) (cUnit->method->insns + mir->offset); pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true); - pcrLabel->opCode = ARM_PSEUDO_kPCReconstruction_CELL; + pcrLabel->opCode = kArmPseudoPCReconstructionCell; pcrLabel->operands[0] = dPC; pcrLabel->operands[1] = mir->offset; /* Insert the place holder to the growable list */ @@ -3487,7 +3487,7 @@ static void setupLoopEntryBlock(CompilationUnit *cUnit, BasicBlock *entry, { /* Set up the place holder to reconstruct this Dalvik PC */ ArmLIR *pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true); - pcrLabel->opCode = ARM_PSEUDO_kPCReconstruction_CELL; + pcrLabel->opCode = kArmPseudoPCReconstructionCell; pcrLabel->operands[0] = (int) (cUnit->method->insns + entry->startOffset); pcrLabel->operands[1] = entry->startOffset; @@ -3598,7 +3598,7 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit) } if (blockList[i]->blockType == kEntryBlock) { - labelList[i].opCode = ARM_PSEUDO_kEntryBlock; + labelList[i].opCode = kArmPseudoEntryBlock; if (blockList[i]->firstMIRInsn == NULL) { continue; } else { @@ -3606,7 +3606,7 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit) &labelList[blockList[i]->fallThrough->id]); } } else if (blockList[i]->blockType == kExitBlock) { - labelList[i].opCode = ARM_PSEUDO_kExitBlock; + labelList[i].opCode = kArmPseudoExitBlock; goto gen_fallthrough; } else if (blockList[i]->blockType == kDalvikByteCode) { labelList[i].opCode = kArmPseudoNormalBlockLabel; @@ -3617,14 +3617,14 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit) } else { switch (blockList[i]->blockType) { case kChainingCellNormal: - labelList[i].opCode = ARM_PSEUDO_kChainingCellNormal; + labelList[i].opCode = kArmPseudoChainingCellNormal; /* handle the codegen later */ dvmInsertGrowableList( &chainingListByType[kChainingCellNormal], (void *) i); break; case kChainingCellInvokeSingleton: labelList[i].opCode = - ARM_PSEUDO_kChainingCellInvokeSingleton; + kArmPseudoChainingCellInvokeSingleton; labelList[i].operands[0] = (int) blockList[i]->containingMethod; /* handle the codegen later */ @@ -3634,7 +3634,7 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit) break; case kChainingCellInvokePredicted: labelList[i].opCode = - ARM_PSEUDO_kChainingCellInvokePredicted; + kArmPseudoChainingCellInvokePredicted; /* handle the codegen later */ dvmInsertGrowableList( &chainingListByType[kChainingCellInvokePredicted], @@ -3642,7 +3642,7 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit) break; case kChainingCellHot: labelList[i].opCode = - ARM_PSEUDO_kChainingCellHot; + kArmPseudoChainingCellHot; /* handle the codegen later */ dvmInsertGrowableList( &chainingListByType[kChainingCellHot], @@ -3651,7 +3651,7 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit) case kPCReconstruction: /* Make sure exception handling block is next */ labelList[i].opCode = - ARM_PSEUDO_kPCReconstruction_BLOCK_LABEL; + kArmPseudoPCReconstructionBlockLabel; assert (i == cUnit->numBlocks - 2); handlePCReconstruction(cUnit, &labelList[i+1]); break; @@ -3667,7 +3667,7 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit) #if defined(WITH_SELF_VERIFICATION) || defined(WITH_JIT_TUNING) case kChainingCellBackwardBranch: labelList[i].opCode = - ARM_PSEUDO_kChainingCellBackwardBranch; + kArmPseudoChainingCellBackwardBranch; /* handle the codegen later */ dvmInsertGrowableList( &chainingListByType[kChainingCellBackwardBranch], @@ -3703,7 +3703,7 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit) InstructionFormat dalvikFormat = dexGetInstrFormat(gDvm.instrFormat, dalvikOpCode); ArmLIR *boundaryLIR = - newLIR2(cUnit, ARM_PSEUDO_kDalvikByteCode_BOUNDARY, + newLIR2(cUnit, kArmPseudoDalvikByteCodeBoundary, mir->offset, (int) dvmCompilerGetDalvikDisassembly(&mir->dalvikInsn) ); diff --git a/vm/interp/Interp.c b/vm/interp/Interp.c index 4e7a7e3c2..440152046 100644 --- a/vm/interp/Interp.c +++ b/vm/interp/Interp.c @@ -1264,7 +1264,8 @@ void dvmInterpret(Thread* self, const Method* method, JValue* pResult) #endif #if defined(WITH_JIT) dvmJitCalleeSave(interpState.calleeSave); - interpState.jitState = gDvmJit.pJitEntryTable ? kJitNormal : kJitOff; + /* Initialize the state to kJitNot */ + interpState.jitState = kJitNot; /* Setup the Jit-to-interpreter entry points */ interpState.jitToInterpEntries = jitToInterpEntries; diff --git a/vm/interp/Jit.c b/vm/interp/Jit.c index dad3a4c8c..6ec558477 100644 --- a/vm/interp/Jit.c +++ b/vm/interp/Jit.c @@ -502,7 +502,7 @@ void resetTracehead(InterpState* interpState, JitEntry *slot) void dvmJitAbortTraceSelect(InterpState* interpState) { if (interpState->jitState == kJitTSelect) - interpState->jitState = kJitTSelectAbort; + interpState->jitState = kJitDone; } /* @@ -593,6 +593,7 @@ static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked) } return (idx == chainEndMarker) ? NULL : &gDvmJit.pJitEntryTable[idx]; } + /* * Adds to the current trace request one instruction at a time, just * before that instruction is interpreted. This is the primary trace @@ -613,11 +614,7 @@ int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState) { int flags,i,len; int switchInterp = false; - int debugOrProfile = (gDvm.debuggerActive || self->suspendCount -#if defined(WITH_PROFILER) - || gDvm.activeProfilers -#endif - ); + bool debugOrProfile = dvmDebuggerOrProfilerActive(); /* Prepare to handle last PC and stage the current PC */ const u2 *lastPC = interpState->lastPC; @@ -695,9 +692,9 @@ int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState) if (interpState->totalTraceLen >= JIT_MAX_TRACE_LEN) { interpState->jitState = kJitTSelectEnd; } + /* Abandon the trace request if debugger/profiler is attached */ if (debugOrProfile) { - interpState->jitState = kJitTSelectAbort; - switchInterp = !debugOrProfile; + interpState->jitState = kJitDone; break; } if ((flags & kInstrCanReturn) != kInstrCanReturn) { @@ -706,10 +703,11 @@ int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState) /* NOTE: intentional fallthrough for returns */ case kJitTSelectEnd: { + /* Bad trace */ if (interpState->totalTraceLen == 0) { /* Bad trace - mark as untranslatable */ - dvmJitAbortTraceSelect(interpState); - switchInterp = !debugOrProfile; + interpState->jitState = kJitDone; + switchInterp = true; break; } JitTraceDescription* desc = @@ -718,14 +716,12 @@ int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState) if (desc == NULL) { LOGE("Out of memory in trace selection"); dvmJitStopTranslationRequests(); - interpState->jitState = kJitTSelectAbort; - dvmJitAbortTraceSelect(interpState); - switchInterp = !debugOrProfile; + interpState->jitState = kJitDone; + switchInterp = true; break; } interpState->trace[interpState->currTraceRun].frag.runEnd = true; - interpState->jitState = kJitNormal; desc->method = interpState->method; memcpy((char*)&(desc->trace[0]), (char*)&(interpState->trace[0]), @@ -755,7 +751,8 @@ int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState) if (jitEntry) { setTraceConstruction(jitEntry, false); } - switchInterp = !debugOrProfile; + interpState->jitState = kJitDone; + switchInterp = true; } break; case kJitSingleStep: @@ -763,20 +760,11 @@ int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState) break; case kJitSingleStepEnd: interpState->entryPoint = kInterpEntryResume; - switchInterp = !debugOrProfile; - break; - case kJitTSelectRequest: - case kJitTSelectRequestHot: - case kJitTSelectAbort: -#if defined(SHOW_TRACE) - LOGD("TraceGen: trace abort"); -#endif - dvmJitAbortTraceSelect(interpState); - interpState->jitState = kJitNormal; - switchInterp = !debugOrProfile; + interpState->jitState = kJitDone; + switchInterp = true; break; - case kJitNormal: - switchInterp = !debugOrProfile; + case kJitDone: + switchInterp = true; break; #if defined(WITH_SELF_VERIFICATION) case kJitSelfVerification: @@ -786,23 +774,36 @@ int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState) * interpreter now. */ if (interpState->jitState != kJitSingleStepEnd) { - interpState->jitState = kJitNormal; - switchInterp = !debugOrProfile; + interpState->jitState = kJitDone; + switchInterp = true; } } break; #endif - /* If JIT is off stay out of interpreter selections */ - case kJitOff: + /* + * If the debug interpreter was entered for non-JIT reasons, check if + * the original reason still holds. If not, we have to force the + * interpreter switch here and use dvmDebuggerOrProfilerActive instead + * of dvmJitDebuggerOrProfilerActive since the latter will alwasy + * return true when the debugger/profiler is already detached and the + * JIT profiling table is restored. + */ + case kJitNot: + switchInterp = !dvmDebuggerOrProfilerActive(); break; default: - if (!debugOrProfile) { - LOGE("Unexpected JIT state: %d", interpState->jitState); - dvmAbort(); - } + LOGE("Unexpected JIT state: %d entry point: %d", + interpState->jitState, interpState->entryPoint); + dvmAbort(); break; } - return switchInterp; + /* + * Final check to see if we can really switch the interpreter. Make sure + * the jitState is kJitDone or kJitNot when switchInterp is set to true. + */ + assert(switchInterp == false || interpState->jitState == kJitDone || + interpState->jitState == kJitNot); + return switchInterp && !debugOrProfile; } JitEntry *dvmFindJitEntry(const u2* pc) @@ -888,23 +889,21 @@ void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set) { /* * Determine if valid trace-bulding request is active. Return true * if we need to abort and switch back to the fast interpreter, false - * otherwise. NOTE: may be called even when trace selection is not being - * requested + * otherwise. */ bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState) { - bool res = false; /* Assume success */ + bool switchInterp = false; /* Assume success */ int i; intptr_t filterKey = ((intptr_t) interpState->pc) >> JIT_TRACE_THRESH_FILTER_GRAN_LOG2; + bool debugOrProfile = dvmDebuggerOrProfilerActive(); - /* - * If previous trace-building attempt failed, force it's head to be - * interpret-only. - */ - if (gDvmJit.pJitEntryTable != NULL) { - /* Bypass the filter for hot trace requests */ - if (interpState->jitState != kJitTSelectRequestHot) { + /* Check if the JIT request can be handled now */ + if (gDvmJit.pJitEntryTable != NULL && debugOrProfile == false) { + /* Bypass the filter for hot trace requests or during stress mode */ + if (interpState->jitState == kJitTSelectRequest && + gDvmJit.threshold > 6) { /* Two-level filtering scheme */ for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) { if (filterKey == interpState->threshFilter[i]) { @@ -919,28 +918,21 @@ bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState) */ i = rand() % JIT_TRACE_THRESH_FILTER_SIZE; interpState->threshFilter[i] = filterKey; - res = true; + interpState->jitState = kJitDone; } + } - /* If stress mode (threshold <= 6), always translate */ - res &= (gDvmJit.threshold > 6); + /* If the compiler is backlogged, cancel any JIT actions */ + if (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) { + interpState->jitState = kJitDone; } /* - * If the compiler is backlogged, or if a debugger or profiler is - * active, cancel any JIT actions + * Check for additional reasons that might force the trace select + * request to be dropped */ - if (res || (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) - || gDvm.debuggerActive || self->suspendCount -#if defined(WITH_PROFILER) - || gDvm.activeProfilers -#endif - ) { - if (interpState->jitState != kJitOff) { - interpState->jitState = kJitNormal; - } - } else if (interpState->jitState == kJitTSelectRequest || - interpState->jitState == kJitTSelectRequestHot) { + if (interpState->jitState == kJitTSelectRequest || + interpState->jitState == kJitTSelectRequestHot) { JitEntry *slot = lookupAndAdd(interpState->pc, false); if (slot == NULL) { /* @@ -949,7 +941,7 @@ bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState) * resized before we run into it here. Assume bad things * are afoot and disable profiling. */ - interpState->jitState = kJitTSelectAbort; + interpState->jitState = kJitDone; LOGD("JIT: JitTable full, disabling profiling"); dvmJitStopTranslationRequests(); } else if (slot->u.info.traceConstruction) { @@ -963,11 +955,11 @@ bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState) * template instead of the real translation during this * window. Performance, but not correctness, issue. */ - interpState->jitState = kJitTSelectAbort; + interpState->jitState = kJitDone; resetTracehead(interpState, slot); } else if (slot->codeAddress) { /* Nothing to do here - just return */ - interpState->jitState = kJitTSelectAbort; + interpState->jitState = kJitDone; } else { /* * Mark request. Note, we are not guaranteed exclusivity @@ -980,39 +972,49 @@ bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState) setTraceConstruction(slot, true); } } + switch (interpState->jitState) { case kJitTSelectRequest: case kJitTSelectRequestHot: - interpState->jitState = kJitTSelect; - interpState->currTraceHead = interpState->pc; - interpState->currTraceRun = 0; - interpState->totalTraceLen = 0; - interpState->currRunHead = interpState->pc; - interpState->currRunLen = 0; - interpState->trace[0].frag.startOffset = - interpState->pc - interpState->method->insns; - interpState->trace[0].frag.numInsts = 0; - interpState->trace[0].frag.runEnd = false; - interpState->trace[0].frag.hint = kJitHintNone; - interpState->lastPC = 0; - break; - case kJitTSelect: - case kJitTSelectAbort: - res = true; - case kJitSingleStep: - case kJitSingleStepEnd: - case kJitOff: - case kJitNormal: -#if defined(WITH_SELF_VERIFICATION) - case kJitSelfVerification: -#endif + interpState->jitState = kJitTSelect; + interpState->currTraceHead = interpState->pc; + interpState->currTraceRun = 0; + interpState->totalTraceLen = 0; + interpState->currRunHead = interpState->pc; + interpState->currRunLen = 0; + interpState->trace[0].frag.startOffset = + interpState->pc - interpState->method->insns; + interpState->trace[0].frag.numInsts = 0; + interpState->trace[0].frag.runEnd = false; + interpState->trace[0].frag.hint = kJitHintNone; + interpState->lastPC = 0; + break; + /* + * For JIT's perspective there is no need to stay in the debug + * interpreter unless debugger/profiler is attached. + */ + case kJitDone: + switchInterp = true; break; default: - LOGE("Unexpected JIT state: %d", interpState->jitState); + LOGE("Unexpected JIT state: %d entry point: %d", + interpState->jitState, interpState->entryPoint); dvmAbort(); } + } else { + /* + * Cannot build trace this time - ready to leave the dbg interpreter + */ + interpState->jitState = kJitDone; + switchInterp = true; } - return res; + + /* + * Final check to see if we can really switch the interpreter. Make sure + * the jitState is kJitDone when switchInterp is set to true. + */ + assert(switchInterp == false || interpState->jitState == kJitDone); + return switchInterp && !debugOrProfile; } /* @@ -1032,7 +1034,7 @@ bool dvmJitResizeJitTable( unsigned int size ) assert(gDvmJit.pJitEntryTable != NULL); assert(size && !(size & (size - 1))); /* Is power of 2? */ - LOGD("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size); + LOGI("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size); newMask = size - 1; diff --git a/vm/mterp/common/asm-constants.h b/vm/mterp/common/asm-constants.h index 4260fd633..135abfdde 100644 --- a/vm/mterp/common/asm-constants.h +++ b/vm/mterp/common/asm-constants.h @@ -293,16 +293,15 @@ MTERP_CONSTANT(kInterpEntryResume, 3) #endif #if defined(WITH_JIT) -MTERP_CONSTANT(kJitOff, 0) -MTERP_CONSTANT(kJitNormal, 1) -MTERP_CONSTANT(kJitTSelectRequest, 2) -MTERP_CONSTANT(kJitTSelectRequestHot, 3) +MTERP_CONSTANT(kJitNot, 0) +MTERP_CONSTANT(kJitTSelectRequest, 1) +MTERP_CONSTANT(kJitTSelectRequestHot, 2) +MTERP_CONSTANT(kJitSelfVerification, 3) MTERP_CONSTANT(kJitTSelect, 4) -MTERP_CONSTANT(kJitTSelectAbort, 5) -MTERP_CONSTANT(kJitTSelectEnd, 6) -MTERP_CONSTANT(kJitSingleStep, 7) -MTERP_CONSTANT(kJitSingleStepEnd, 8) -MTERP_CONSTANT(kJitSelfVerification, 9) +MTERP_CONSTANT(kJitTSelectEnd, 5) +MTERP_CONSTANT(kJitSingleStep, 6) +MTERP_CONSTANT(kJitSingleStepEnd, 7) +MTERP_CONSTANT(kJitDone, 8) #if defined(WITH_SELF_VERIFICATION) MTERP_CONSTANT(kSVSIdle, 0) diff --git a/vm/mterp/out/InterpC-portdbg.c b/vm/mterp/out/InterpC-portdbg.c index fbb06bfe7..c27582bf8 100644 --- a/vm/mterp/out/InterpC-portdbg.c +++ b/vm/mterp/out/InterpC-portdbg.c @@ -1481,18 +1481,16 @@ bool INTERP_FUNC_NAME(Thread* self, InterpState* interpState) #if INTERP_TYPE == INTERP_DBG /* Check to see if we've got a trace selection request. */ if ( -#if defined(WITH_SELF_VERIFICATION) - (interpState->jitState != kJitSelfVerification) && -#endif /* - * Don't bail out of the dbg interpreter if the entry reason is to - * deal with a thrown exception. + * Only perform dvmJitCheckTraceRequest if the entry point is + * EntryInstr and the jit state is either kJitTSelectRequest or + * kJitTSelectRequestHot. If debugger/profiler happens to be attached, + * dvmJitCheckTraceRequest will change the jitState to kJitDone but + * but stay in the dbg interpreter. */ - (interpState->entryPoint != kInterpEntryThrow) && - !gDvm.debuggerActive && -#if defined(WITH_PROFILER) - (gDvm.activeProfilers == 0) && -#endif + (interpState->entryPoint == kInterpEntryInstr) && + (interpState->jitState == kJitTSelectRequest || + interpState->jitState == kJitTSelectRequestHot) && dvmJitCheckTraceRequest(self, interpState)) { interpState->nextMode = INTERP_STD; //LOGD("Invalid trace request, exiting\n"); diff --git a/vm/mterp/out/InterpC-portstd.c b/vm/mterp/out/InterpC-portstd.c index a0bc17e49..1e7550cff 100644 --- a/vm/mterp/out/InterpC-portstd.c +++ b/vm/mterp/out/InterpC-portstd.c @@ -1220,18 +1220,16 @@ bool INTERP_FUNC_NAME(Thread* self, InterpState* interpState) #if INTERP_TYPE == INTERP_DBG /* Check to see if we've got a trace selection request. */ if ( -#if defined(WITH_SELF_VERIFICATION) - (interpState->jitState != kJitSelfVerification) && -#endif /* - * Don't bail out of the dbg interpreter if the entry reason is to - * deal with a thrown exception. + * Only perform dvmJitCheckTraceRequest if the entry point is + * EntryInstr and the jit state is either kJitTSelectRequest or + * kJitTSelectRequestHot. If debugger/profiler happens to be attached, + * dvmJitCheckTraceRequest will change the jitState to kJitDone but + * but stay in the dbg interpreter. */ - (interpState->entryPoint != kInterpEntryThrow) && - !gDvm.debuggerActive && -#if defined(WITH_PROFILER) - (gDvm.activeProfilers == 0) && -#endif + (interpState->entryPoint == kInterpEntryInstr) && + (interpState->jitState == kJitTSelectRequest || + interpState->jitState == kJitTSelectRequestHot) && dvmJitCheckTraceRequest(self, interpState)) { interpState->nextMode = INTERP_STD; //LOGD("Invalid trace request, exiting\n"); diff --git a/vm/mterp/portable/entry.c b/vm/mterp/portable/entry.c index 6789450f5..dbd556112 100644 --- a/vm/mterp/portable/entry.c +++ b/vm/mterp/portable/entry.c @@ -45,18 +45,16 @@ bool INTERP_FUNC_NAME(Thread* self, InterpState* interpState) #if INTERP_TYPE == INTERP_DBG /* Check to see if we've got a trace selection request. */ if ( -#if defined(WITH_SELF_VERIFICATION) - (interpState->jitState != kJitSelfVerification) && -#endif /* - * Don't bail out of the dbg interpreter if the entry reason is to - * deal with a thrown exception. + * Only perform dvmJitCheckTraceRequest if the entry point is + * EntryInstr and the jit state is either kJitTSelectRequest or + * kJitTSelectRequestHot. If debugger/profiler happens to be attached, + * dvmJitCheckTraceRequest will change the jitState to kJitDone but + * but stay in the dbg interpreter. */ - (interpState->entryPoint != kInterpEntryThrow) && - !gDvm.debuggerActive && -#if defined(WITH_PROFILER) - (gDvm.activeProfilers == 0) && -#endif + (interpState->entryPoint == kInterpEntryInstr) && + (interpState->jitState == kJitTSelectRequest || + interpState->jitState == kJitTSelectRequestHot) && dvmJitCheckTraceRequest(self, interpState)) { interpState->nextMode = INTERP_STD; //LOGD("Invalid trace request, exiting\n");