bool blockingMode;
bool methodTraceSupport;
bool genSuspendPoll;
+ Thread* compilerThread;
pthread_t compilerHandle;
pthread_mutex_t compilerLock;
pthread_mutex_t compilerICPatchLock;
/* Compiled code cache */
void* codeCache;
+ /*
+ * This is used to store the base address of an in-flight compilation whose
+ * class object pointers have been calculated to populate literal pool.
+ * Once the compiler thread has changed its status to VM_WAIT, we cannot
+ * guarantee whether GC has happened before the code address has been
+ * installed to the JIT table. Because of that, this field can only
+ * been cleared/overwritten by the compiler thread if it is in the
+ * THREAD_RUNNING state or in a safe point.
+ */
+ void *inflightBaseAddr;
+
/* Translation cache version (protected by compilerLock */
int cacheVersion;
int icPatchQueued;
int icPatchRejected;
int icPatchDropped;
- u8 jitTime;
int codeCachePatches;
+ int numCompilerThreadBlockGC;
+ u8 jitTime;
+ u8 compilerThreadBlockGCStart;
+ u8 compilerThreadBlockGCTime;
+ u8 maxCompilerThreadBlockGCTime;
#endif
/* Place arrays at the end to ease the display in gdb sessions */
}
#if defined(WITH_JIT) && defined(WITH_JIT_TUNING)
+/* Sample callback function for dvmJitScanAllClassPointers */
+void printAllClass(void *ptr)
+{
+ ClassObject **classPP = (ClassObject **) ptr;
+ LOGE("class %s", (*classPP)->descriptor);
+
+}
+
/*
* Respond to a SIGUSR2 by dumping some JIT stats and possibly resetting
* the code cache.
{
static int codeCacheResetCount = 0;
if ((--codeCacheResetCount & 7) == 0) {
+ /* Dump all class pointers in the traces */
+ dvmJitScanAllClassPointers(printAllClass);
gDvmJit.codeCacheFull = true;
} else {
dvmCompilerDumpStats();
return work;
}
-
/*
* Enqueue a work order - retrying until successful. If attempt to enqueue
* is repeatedly unsuccessful, assume the JIT is in a bad state and force a
gDvmJit.compilerICPatchIndex = 0;
dvmUnlockMutex(&gDvmJit.compilerICPatchLock);
+ /*
+ * Reset the inflight compilation address (can only be done in safe points
+ * or by the compiler thread when its thread state is RUNNING).
+ */
+ gDvmJit.inflightBaseAddr = NULL;
+
/* All clear now */
gDvmJit.codeCacheFull = false;
goto fail;
}
+ /* Cache the thread pointer */
+ gDvmJit.compilerThread = dvmThreadSelf();
+
dvmLockMutex(&gDvmJit.compilerLock);
/* Track method-level compilation statistics */
typedef struct ICPatchWorkOrder {
PredictedChainingCell *cellAddr; /* Address to be patched */
PredictedChainingCell cellContent; /* content of the new cell */
+ const char *classDescriptor; /* Descriptor of the class object */
+ Object *classLoader; /* Class loader */
+ u4 serialNumber; /* Serial # (for verification only) */
} ICPatchWorkOrder;
/*
void dvmCompilerDumpStats(void);
void dvmCompilerDrainQueue(void);
void dvmJitUnchainAll(void);
+void dvmJitScanAllClassPointers(void (*callback)(void *ptr));
void dvmCompilerSortAndPrintTraceProfiles(void);
void dvmCompilerPerformSafePointChecks(void);
void dvmCompilerInlineMIR(struct CompilationUnit *cUnit,
#define MIR_INVOKE_METHOD_JIT (1 << kMIRInvokeMethodJIT)
typedef struct CallsiteInfo {
- const ClassObject *clazz;
+ const char *classDescriptor;
+ Object *classLoader;
const Method *method;
LIR *misPredBranchOver;
} CallsiteInfo;
const JitTraceDescription *traceDesc;
LIR *firstLIRInsn;
LIR *lastLIRInsn;
- LIR *wordList;
+ LIR *literalList; // Constants
+ LIR *classPointerList; // Relocatable
+ int numClassPointers;
LIR *chainCellOffsetLIR;
GrowableList pcReconstructionList;
int headerSize; // bytes before the first code ptr
void *baseAddr;
bool printMe;
bool allSingleStep;
+ bool hasClassLiterals; // Contains class ptrs used as literals
bool hasLoop; // Contains a loop
bool hasInvoke; // Contains an invoke instruction
bool heapMemOp; // Mark mem ops for self verification
{
const DexCode *dexCode = dvmGetMethodCode(desc->method);
const JitTraceRun* currRun = &desc->trace[0];
- unsigned int curOffset = currRun->frag.startOffset;
- unsigned int numInsts = currRun->frag.numInsts;
+ unsigned int curOffset = currRun->info.frag.startOffset;
+ unsigned int numInsts = currRun->info.frag.numInsts;
const u2 *codePtr = dexCode->insns + curOffset;
int traceSize = 0; // # of half-words
const u2 *startCodePtr = codePtr;
int flags = dexGetFlagsFromOpcode(insn->dalvikInsn.opcode);
if (flags & kInstrInvoke) {
- const Method *calleeMethod = (const Method *) currRun[2].meta;
+ const Method *calleeMethod = (const Method *)
+ currRun[JIT_TRACE_CUR_METHOD].info.meta;
assert(numInsts == 1);
CallsiteInfo *callsiteInfo =
(CallsiteInfo *)dvmCompilerNew(sizeof(CallsiteInfo), true);
- callsiteInfo->clazz = (ClassObject *)currRun[1].meta;
+ callsiteInfo->classDescriptor = (const char *)
+ currRun[JIT_TRACE_CLASS_DESC].info.meta;
+ callsiteInfo->classLoader = (Object *)
+ currRun[JIT_TRACE_CLASS_LOADER].info.meta;
callsiteInfo->method = calleeMethod;
insn->meta.callsiteInfo = callsiteInfo;
}
break;
}
if (--numInsts == 0) {
- if (currRun->frag.runEnd) {
+ if (currRun->info.frag.runEnd) {
break;
} else {
/* Advance to the next trace description (ie non-meta info) */
do {
currRun++;
- } while (!currRun->frag.isCode);
+ } while (!currRun->isCode);
/* Dummy end-of-run marker seen */
- if (currRun->frag.numInsts == 0) {
+ if (currRun->info.frag.numInsts == 0) {
break;
}
curBB = dvmCompilerNewBB(kDalvikByteCode, numBlocks++);
dvmInsertGrowableList(blockList, (intptr_t) curBB);
- curOffset = currRun->frag.startOffset;
- numInsts = currRun->frag.numInsts;
+ curOffset = currRun->info.frag.startOffset;
+ numInsts = currRun->info.frag.numInsts;
curBB->startOffset = curOffset;
codePtr = dexCode->insns + curOffset;
}
desc->method->clazz->descriptor,
desc->method->name,
signature,
- desc->trace[0].frag.startOffset,
+ desc->trace[0].info.frag.startOffset,
traceSize,
dexCode->insnsSize,
numBlocks);
cUnit.numInsts);
}
- /* Reset the compiler resource pool */
- dvmCompilerArenaReset();
-
if (cUnit.assemblerStatus == kRetryHalve) {
+ /* Reset the compiler resource pool before retry */
+ dvmCompilerArenaReset();
+
/* Halve the instruction count and start from the top */
return dvmCompileTrace(desc, cUnit.numInsts / 2, info, bailPtr,
optHints);
}
+ /*
+ * If this trace uses class objects as constants,
+ * dvmJitInstallClassObjectPointers will switch the thread state
+ * to running and look up the class pointers using the descriptor/loader
+ * tuple stored in the callsite info structure. We need to make this window
+ * as short as possible since it is blocking GC.
+ */
+ if (cUnit.hasClassLiterals && info->codeAddress) {
+ dvmJitInstallClassObjectPointers(&cUnit, (char *) info->codeAddress);
+ }
+
+ /*
+ * Since callsiteinfo is allocated from the arena, delay the reset until
+ * class pointers are resolved.
+ */
+ dvmCompilerArenaReset();
+
assert(cUnit.assemblerStatus == kSuccess);
#if defined(WITH_JIT_TUNING)
methodStats->nativeSize += cUnit.totalSize;
}
}
}
+
+/*
+ * Load a class pointer value into a fixed or temp register. Target
+ * register is clobbered, and marked inUse.
+ */
+static ArmLIR *loadClassPointer(CompilationUnit *cUnit, int rDest, int value)
+{
+ ArmLIR *res;
+ cUnit->hasClassLiterals = true;
+ if (dvmCompilerIsTemp(cUnit, rDest)) {
+ dvmCompilerClobber(cUnit, rDest);
+ dvmCompilerMarkInUse(cUnit, rDest);
+ }
+ ArmLIR *dataTarget = scanLiteralPool(cUnit->classPointerList, value, 0);
+ if (dataTarget == NULL) {
+ dataTarget = addWordData(cUnit, &cUnit->classPointerList, value);
+ /* Counts the number of class pointers in this translation */
+ cUnit->numClassPointers++;
+ }
+ ArmLIR *loadPcRel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
+ loadPcRel->opcode = kThumb2LdrPcRel12;
+ loadPcRel->generic.target = (LIR *) dataTarget;
+ loadPcRel->operands[0] = rDest;
+ setupResourceMasks(loadPcRel);
+ setMemRefType(loadPcRel, true, kLiteral);
+ loadPcRel->aliasInfo = dataTarget->operands[0];
+ res = loadPcRel;
+ dvmCompilerAppendLIR(cUnit, (LIR *) loadPcRel);
+ return res;
+}
/* Assemble LIR into machine code */
void dvmCompilerAssembleLIR(CompilationUnit *cUnit, JitTranslationInfo *info);
+/* Install class objects in the literal pool */
+void dvmJitInstallClassObjectPointers(CompilationUnit *cUnit,
+ char *codeAddress);
+
/* Patch inline cache content for polymorphic callsites */
bool dvmJitPatchInlineCache(void *cellPtr, void *contentPtr);
void dvmCompilerCodegenDump(CompilationUnit *cUnit);
/* Implemented in the codegen/<target>/Assembler.c */
-void* dvmJitChain(void *tgtAddr, u4* branchAddr);
-u4* dvmJitUnchain(void *codeAddr);
-void dvmJitUnchainAll(void);
void dvmCompilerPatchInlineCache(void);
/* Implemented in codegen/<target>/Ralloc.c */
for (lirInsn = cUnit->firstLIRInsn; lirInsn; lirInsn = lirInsn->next) {
dvmDumpLIRInsn(lirInsn, (unsigned char *) cUnit->baseAddr);
}
- for (lirInsn = cUnit->wordList; lirInsn; lirInsn = lirInsn->next) {
+ for (lirInsn = cUnit->classPointerList; lirInsn; lirInsn = lirInsn->next) {
+ armLIR = (ArmLIR *) lirInsn;
+ LOGD("%p (%04x): .class (%s)\n",
+ (char*)cUnit->baseAddr + armLIR->generic.offset,
+ armLIR->generic.offset,
+ ((CallsiteInfo *) armLIR->operands[0])->classDescriptor);
+ }
+ for (lirInsn = cUnit->literalList; lirInsn; lirInsn = lirInsn->next) {
armLIR = (ArmLIR *) lirInsn;
LOGD("%p (%04x): .word (0x%x)\n",
(char*)cUnit->baseAddr + armLIR->generic.offset,
#define UPDATE_CODE_CACHE_PATCHES()
#endif
-/* Write the numbers in the literal pool to the codegen stream */
-static void installDataContent(CompilationUnit *cUnit)
+/* Write the numbers in the constant and class pool to the output stream */
+static void installLiteralPools(CompilationUnit *cUnit)
{
int *dataPtr = (int *) ((char *) cUnit->baseAddr + cUnit->dataOffset);
- ArmLIR *dataLIR = (ArmLIR *) cUnit->wordList;
+ /* Install number of class pointer literals */
+ *dataPtr++ = cUnit->numClassPointers;
+ ArmLIR *dataLIR = (ArmLIR *) cUnit->classPointerList;
while (dataLIR) {
+ /*
+ * Install the callsiteinfo pointers into the cells for now. They will
+ * be converted into real pointers in dvmJitInstallClassObjectPointers.
+ */
*dataPtr++ = dataLIR->operands[0];
dataLIR = NEXT_LIR(dataLIR);
}
-}
-
-/* Returns the size of a Jit trace description */
-static int jitTraceDescriptionSize(const JitTraceDescription *desc)
-{
- int runCount;
- /* Trace end is always of non-meta type (ie isCode == true) */
- for (runCount = 0; ; runCount++) {
- if (desc->trace[runCount].frag.isCode &&
- desc->trace[runCount].frag.runEnd)
- break;
+ dataLIR = (ArmLIR *) cUnit->literalList;
+ while (dataLIR) {
+ *dataPtr++ = dataLIR->operands[0];
+ dataLIR = NEXT_LIR(dataLIR);
}
- return sizeof(JitTraceDescription) + ((runCount+1) * sizeof(JitTraceRun));
}
/*
return kSuccess;
}
-#if defined(SIGNATURE_BREAKPOINT)
-/* Inspect the assembled instruction stream to find potential matches */
-static void matchSignatureBreakpoint(const CompilationUnit *cUnit,
- unsigned int size)
+static int assignLiteralOffsetCommon(LIR *lir, int offset)
{
- unsigned int i, j;
- u4 *ptr = (u4 *) cUnit->codeBuffer;
-
- for (i = 0; i < size - gDvmJit.signatureBreakpointSize + 1; i++) {
- if (ptr[i] == gDvmJit.signatureBreakpoint[0]) {
- for (j = 1; j < gDvmJit.signatureBreakpointSize; j++) {
- if (ptr[i+j] != gDvmJit.signatureBreakpoint[j]) {
- break;
- }
- }
- if (j == gDvmJit.signatureBreakpointSize) {
- LOGD("Signature match starting from offset %#x (%d words)",
- i*4, gDvmJit.signatureBreakpointSize);
- int descSize = jitTraceDescriptionSize(cUnit->traceDesc);
- JitTraceDescription *newCopy =
- (JitTraceDescription *) malloc(descSize);
- memcpy(newCopy, cUnit->traceDesc, descSize);
- dvmCompilerWorkEnqueue(NULL, kWorkOrderTraceDebug, newCopy);
- break;
- }
- }
+ for (;lir != NULL; lir = lir->next) {
+ lir->offset = offset;
+ offset += 4;
}
+ return offset;
+}
+
+/* Determine the offset of each literal field */
+static int assignLiteralOffset(CompilationUnit *cUnit, int offset)
+{
+ /* Reserved for the size field of class pointer pool */
+ offset += 4;
+ offset = assignLiteralOffsetCommon(cUnit->classPointerList, offset);
+ offset = assignLiteralOffsetCommon(cUnit->literalList, offset);
+ return offset;
}
-#endif
/*
* Translation layout in the code cache. Note that the codeAddress pointer
* counter is at codeAddress - 6.
*
* +----------------------------+
- * | Trace Profile Counter addr | -> 4 bytes
+ * | Trace Profile Counter addr | -> 4 bytes (PROF_COUNTER_ADDR_SIZE)
* +----------------------------+
- * +--| Offset to chain cell counts| -> 2 bytes
+ * +--| Offset to chain cell counts| -> 2 bytes (CHAIN_CELL_OFFSET_SIZE)
* | +----------------------------+
* | | Trace profile code | <- entry point when profiling
* | . - - - - - - - .
* . .
* | |
* +----------------------------+
+ * | # Class pointer pool size | -> 4 bytes
+ * +----------------------------+
+ * | Class pointer pool | -> 4-byte aligned, variable size
+ * . .
+ * . .
+ * | |
+ * +----------------------------+
* | Literal pool | -> 4-byte aligned, variable size
* . .
* . .
* | |
* +----------------------------+
*
+ */
+
+#define PROF_COUNTER_ADDR_SIZE 4
+#define CHAIN_CELL_OFFSET_SIZE 2
+
+/*
+ * Utility functions to navigate various parts in a trace. If we change the
+ * layout/offset in the future, we just modify these functions and we don't need
+ * to propagate the changes to all the use cases.
+ */
+static inline char *getTraceBase(const JitEntry *p)
+{
+ return (char*)p->codeAddress -
+ (PROF_COUNTER_ADDR_SIZE + CHAIN_CELL_OFFSET_SIZE +
+ (p->u.info.instructionSet == DALVIK_JIT_ARM ? 0 : 1));
+}
+
+/* Handy function to retrieve the profile count */
+static inline JitTraceCounter_t getProfileCount(const JitEntry *entry)
+{
+ if (entry->dPC == 0 || entry->codeAddress == 0 ||
+ entry->codeAddress == dvmCompilerGetInterpretTemplate())
+ return 0;
+
+ JitTraceCounter_t **p = (JitTraceCounter_t **) getTraceBase(entry);
+
+ return **p;
+}
+
+/* Handy function to reset the profile count */
+static inline void resetProfileCount(const JitEntry *entry)
+{
+ if (entry->dPC == 0 || entry->codeAddress == 0 ||
+ entry->codeAddress == dvmCompilerGetInterpretTemplate())
+ return;
+
+ JitTraceCounter_t **p = (JitTraceCounter_t **) getTraceBase(entry);
+
+ **p = 0;
+}
+
+/* Get the pointer of the chain cell count */
+static inline ChainCellCounts* getChainCellCountsPointer(const char *base)
+{
+ /* 4 is the size of the profile count */
+ u2 *chainCellOffsetP = (u2 *) (base + PROF_COUNTER_ADDR_SIZE);
+ u2 chainCellOffset = *chainCellOffsetP;
+ return (ChainCellCounts *) ((char *) chainCellOffsetP + chainCellOffset);
+}
+
+/* Get the size of all chaining cells */
+static inline u4 getChainCellSize(const ChainCellCounts* pChainCellCounts)
+{
+ int cellSize = 0;
+ int i;
+
+ /* Get total count of chain cells */
+ for (i = 0; i < kChainingCellGap; i++) {
+ if (i != kChainingCellInvokePredicted) {
+ cellSize += pChainCellCounts->u.count[i] *
+ (CHAIN_CELL_NORMAL_SIZE >> 2);
+ } else {
+ cellSize += pChainCellCounts->u.count[i] *
+ (CHAIN_CELL_PREDICTED_SIZE >> 2);
+ }
+ }
+ return cellSize;
+}
+
+/* Get the starting pointer of the trace description section */
+static JitTraceDescription* getTraceDescriptionPointer(const char *base)
+{
+ ChainCellCounts* pCellCounts = getChainCellCountsPointer(base);
+ return (JitTraceDescription*) ((char*)pCellCounts + sizeof(*pCellCounts));
+}
+
+/* Get the size of a trace description */
+static int getTraceDescriptionSize(const JitTraceDescription *desc)
+{
+ int runCount;
+ /* Trace end is always of non-meta type (ie isCode == true) */
+ for (runCount = 0; ; runCount++) {
+ if (desc->trace[runCount].isCode &&
+ desc->trace[runCount].info.frag.runEnd)
+ break;
+ }
+ return sizeof(JitTraceDescription) + ((runCount+1) * sizeof(JitTraceRun));
+}
+
+#if defined(SIGNATURE_BREAKPOINT)
+/* Inspect the assembled instruction stream to find potential matches */
+static void matchSignatureBreakpoint(const CompilationUnit *cUnit,
+ unsigned int size)
+{
+ unsigned int i, j;
+ u4 *ptr = (u4 *) cUnit->codeBuffer;
+
+ for (i = 0; i < size - gDvmJit.signatureBreakpointSize + 1; i++) {
+ if (ptr[i] == gDvmJit.signatureBreakpoint[0]) {
+ for (j = 1; j < gDvmJit.signatureBreakpointSize; j++) {
+ if (ptr[i+j] != gDvmJit.signatureBreakpoint[j]) {
+ break;
+ }
+ }
+ if (j == gDvmJit.signatureBreakpointSize) {
+ LOGD("Signature match starting from offset %#x (%d words)",
+ i*4, gDvmJit.signatureBreakpointSize);
+ int descSize = getTraceDescriptionSize(cUnit->traceDesc);
+ JitTraceDescription *newCopy =
+ (JitTraceDescription *) malloc(descSize);
+ memcpy(newCopy, cUnit->traceDesc, descSize);
+ dvmCompilerWorkEnqueue(NULL, kWorkOrderTraceDebug, newCopy);
+ break;
+ }
+ }
+ }
+}
+#endif
+
+/*
* Go over each instruction in the list and calculate the offset from the top
* before sending them off to the assembler. If out-of-range branch distance is
* seen rearrange the instructions a bit to correct it.
*/
void dvmCompilerAssembleLIR(CompilationUnit *cUnit, JitTranslationInfo *info)
{
- LIR *lir;
ArmLIR *armLIR;
int offset = 0;
int i;
ChainCellCounts chainCellCounts;
int descSize =
- cUnit->methodJitMode ? 0 : jitTraceDescriptionSize(cUnit->traceDesc);
+ cUnit->methodJitMode ? 0 : getTraceDescriptionSize(cUnit->traceDesc);
int chainingCellGap = 0;
info->instructionSet = cUnit->instructionSet;
/* Set up offsets for literals */
cUnit->dataOffset = offset;
- for (lir = cUnit->wordList; lir; lir = lir->next) {
- lir->offset = offset;
- offset += 4;
- }
+ /*
+ * Assign each class pointer/constant an offset from the beginning of the
+ * compilation unit.
+ */
+ offset = assignLiteralOffset(cUnit, offset);
cUnit->totalSize = offset;
}
/* Write the literals directly into the code cache */
- installDataContent(cUnit);
-
+ installLiteralPools(cUnit);
/* Flush dcache and invalidate the icache to maintain coherence */
dvmCompilerCacheFlush((long)cUnit->baseAddr,
*/
} else if (gDvmJit.compilerICPatchIndex < COMPILER_IC_PATCH_QUEUE_SIZE) {
int index = gDvmJit.compilerICPatchIndex++;
+ const ClassObject *clazz = newContent->clazz;
+
gDvmJit.compilerICPatchQueue[index].cellAddr = cellAddr;
gDvmJit.compilerICPatchQueue[index].cellContent = *newContent;
+ gDvmJit.compilerICPatchQueue[index].classDescriptor = clazz->descriptor;
+ gDvmJit.compilerICPatchQueue[index].classLoader = clazz->classLoader;
+ /* For verification purpose only */
+ gDvmJit.compilerICPatchQueue[index].serialNumber = clazz->serialNumber;
#if defined(WITH_JIT_TUNING)
gDvmJit.icPatchQueued++;
#endif
maxAddr = (PredictedChainingCell *) gDvmJit.codeCache;
for (i = 0; i < gDvmJit.compilerICPatchIndex; i++) {
- PredictedChainingCell *cellAddr =
- gDvmJit.compilerICPatchQueue[i].cellAddr;
- PredictedChainingCell *cellContent =
- &gDvmJit.compilerICPatchQueue[i].cellContent;
+ ICPatchWorkOrder *workOrder = &gDvmJit.compilerICPatchQueue[i];
+ PredictedChainingCell *cellAddr = workOrder->cellAddr;
+ PredictedChainingCell *cellContent = &workOrder->cellContent;
+ ClassObject *clazz = dvmFindClassNoInit(workOrder->classDescriptor,
+ workOrder->classLoader);
+
+ assert(clazz->serialNumber == workOrder->serialNumber);
+
+ /* Use the newly resolved clazz pointer */
+ cellContent->clazz = clazz;
COMPILER_TRACE_CHAINING(
LOGD("Jit Runtime: predicted chain %p from %s to %s (%s) "
* the incoming codeAddr is a thumb code address, and therefore has
* the low bit set.
*/
-u4* dvmJitUnchain(void* codeAddr)
+static u4* unchainSingle(JitEntry *trace)
{
- u2* pChainCellOffset = (u2*)((char*)codeAddr - 3);
- u2 chainCellOffset = *pChainCellOffset;
- ChainCellCounts *pChainCellCounts =
- (ChainCellCounts*)((char*)codeAddr + chainCellOffset - 3);
- int cellSize;
+ const char *base = getTraceBase(trace);
+ ChainCellCounts *pChainCellCounts = getChainCellCountsPointer(base);
+ int cellSize = getChainCellSize(pChainCellCounts);
u4* pChainCells;
- u4* pStart;
u4 newInst;
int i,j;
PredictedChainingCell *predChainCell;
- /* Get total count of chain cells */
- for (i = 0, cellSize = 0; i < kChainingCellGap; i++) {
- if (i != kChainingCellInvokePredicted) {
- cellSize += pChainCellCounts->u.count[i] * (CHAIN_CELL_NORMAL_SIZE >> 2);
- } else {
- cellSize += pChainCellCounts->u.count[i] *
- (CHAIN_CELL_PREDICTED_SIZE >> 2);
- }
- }
-
if (cellSize == 0)
return (u4 *) pChainCellCounts;
/* Locate the beginning of the chain cell region */
- pStart = pChainCells = ((u4 *) pChainCellCounts) - cellSize -
- pChainCellCounts->u.count[kChainingCellGap];
+ pChainCells = ((u4 *) pChainCellCounts) - cellSize -
+ pChainCellCounts->u.count[kChainingCellGap];
/* The cells are sorted in order - walk through them and reset */
for (i = 0; i < kChainingCellGap; i++) {
(gDvmJit.pJitEntryTable[i].codeAddress !=
dvmCompilerGetInterpretTemplate())) {
u4* lastAddress;
- lastAddress =
- dvmJitUnchain(gDvmJit.pJitEntryTable[i].codeAddress);
+ lastAddress = unchainSingle(&gDvmJit.pJitEntryTable[i]);
if (lowAddress == NULL ||
(u4*)gDvmJit.pJitEntryTable[i].codeAddress <
lowAddress)
return 0;
}
-static char *getTraceBase(const JitEntry *p)
-{
- return (char*)p->codeAddress -
- (6 + (p->u.info.instructionSet == DALVIK_JIT_ARM ? 0 : 1));
-}
-
-/* Handy function to retrieve the profile count */
-static inline JitTraceCounter_t getProfileCount(const JitEntry *entry)
-{
- if (entry->dPC == 0 || entry->codeAddress == 0 ||
- entry->codeAddress == dvmCompilerGetInterpretTemplate())
- return 0;
-
- JitTraceCounter_t **p = (JitTraceCounter_t **) getTraceBase(entry);
-
- return **p;
-}
-
-/* Handy function to reset the profile count */
-static inline void resetProfileCount(const JitEntry *entry)
-{
- if (entry->dPC == 0 || entry->codeAddress == 0 ||
- entry->codeAddress == dvmCompilerGetInterpretTemplate())
- return;
-
- JitTraceCounter_t **p = (JitTraceCounter_t **) getTraceBase(entry);
-
- **p = 0;
-}
-
/* Dumps profile info for a single trace */
static int dumpTraceProfile(JitEntry *p, bool silent, bool reset,
unsigned long sum)
{
- ChainCellCounts* pCellCounts;
- char* traceBase;
- JitTraceCounter_t count;
- u2* pCellOffset;
- JitTraceDescription *desc;
- const Method* method;
int idx;
- traceBase = getTraceBase(p);
-
if (p->codeAddress == NULL) {
if (!silent)
- LOGD("TRACEPROFILE 0x%08x 0 NULL 0 0", (int)traceBase);
+ LOGD("TRACEPROFILE NULL");
return 0;
}
if (p->codeAddress == dvmCompilerGetInterpretTemplate()) {
if (!silent)
- LOGD("TRACEPROFILE 0x%08x 0 INTERPRET_ONLY 0 0", (int)traceBase);
+ LOGD("TRACEPROFILE INTERPRET_ONLY");
return 0;
}
- count = getProfileCount(p);
+ JitTraceCounter_t count = getProfileCount(p);
if (reset) {
resetProfileCount(p);
}
if (silent) {
return count;
}
- pCellOffset = (u2*) (traceBase + 4);
- pCellCounts = (ChainCellCounts*) ((char *)pCellOffset + *pCellOffset);
- desc = (JitTraceDescription*) ((char*)pCellCounts + sizeof(*pCellCounts));
- method = desc->method;
+ JitTraceDescription *desc = getTraceDescriptionPointer(getTraceBase(p));
+ const Method *method = desc->method;
char *methodDesc = dexProtoCopyMethodDescriptor(&method->prototype);
- jitProfileAddrToLine addrToLine = {0, desc->trace[0].frag.startOffset};
+ jitProfileAddrToLine addrToLine = {0, desc->trace[0].info.frag.startOffset};
/*
* We may end up decoding the debug information for the same method
addrToLineCb, NULL, &addrToLine);
LOGD("TRACEPROFILE 0x%08x % 10d %5.2f%% [%#x(+%d), %d] %s%s;%s",
- (int)traceBase,
+ (int) getTraceBase(p),
count,
((float ) count) / sum * 100.0,
- desc->trace[0].frag.startOffset,
- desc->trace[0].frag.numInsts,
+ desc->trace[0].info.frag.startOffset,
+ desc->trace[0].info.frag.numInsts,
addrToLine.lineNum,
method->clazz->descriptor, method->name, methodDesc);
free(methodDesc);
/* Find the last fragment (ie runEnd is set) */
for (idx = 0;
- desc->trace[idx].frag.isCode && !desc->trace[idx].frag.runEnd;
+ desc->trace[idx].isCode && !desc->trace[idx].info.frag.runEnd;
idx++) {
}
* runEnd must comes with a JitCodeDesc frag. If isCode is false it must
* be a meta info field (only used by callsite info for now).
*/
- if (!desc->trace[idx].frag.isCode) {
- const Method *method = (const Method *)desc->trace[idx+1].meta;
+ if (!desc->trace[idx].isCode) {
+ const Method *method = (const Method *)
+ desc->trace[idx+JIT_TRACE_CUR_METHOD-1].info.meta;
char *methodDesc = dexProtoCopyMethodDescriptor(&method->prototype);
/* Print the callee info in the trace */
LOGD(" -> %s%s;%s", method->clazz->descriptor, method->name,
if ((jitEntry == NULL) || (jitEntry->codeAddress == 0))
return NULL;
- /* Find out the startint point */
- char *traceBase = getTraceBase(jitEntry);
-
- /* Then find out the starting point of the chaining cell */
- u2 *pCellOffset = (u2*) (traceBase + 4);
- ChainCellCounts *pCellCounts =
- (ChainCellCounts*) ((char *)pCellOffset + *pCellOffset);
-
- /* From there we can find out the starting point of the trace descriptor */
JitTraceDescription *desc =
- (JitTraceDescription*) ((char*)pCellCounts + sizeof(*pCellCounts));
+ getTraceDescriptionPointer(getTraceBase(jitEntry));
/* Now make a copy and return */
- int descSize = jitTraceDescriptionSize(desc);
+ int descSize = getTraceDescriptionSize(desc);
JitTraceDescription *newCopy = (JitTraceDescription *) malloc(descSize);
memcpy(newCopy, desc, descSize);
return newCopy;
return;
}
+static void findClassPointersSingleTrace(char *base, void (*callback)(void *))
+{
+ unsigned int chainTypeIdx, chainIdx;
+ ChainCellCounts *pChainCellCounts = getChainCellCountsPointer(base);
+ int cellSize = getChainCellSize(pChainCellCounts);
+ /* Scan the chaining cells */
+ if (cellSize) {
+ /* Locate the beginning of the chain cell region */
+ u4 *pChainCells = ((u4 *) pChainCellCounts) - cellSize -
+ pChainCellCounts->u.count[kChainingCellGap];
+ /* The cells are sorted in order - walk through them */
+ for (chainTypeIdx = 0; chainTypeIdx < kChainingCellGap;
+ chainTypeIdx++) {
+ if (chainTypeIdx != kChainingCellInvokePredicted) {
+ /* In 32-bit words */
+ pChainCells += (CHAIN_CELL_NORMAL_SIZE >> 2) *
+ pChainCellCounts->u.count[chainTypeIdx];
+ continue;
+ }
+ for (chainIdx = 0;
+ chainIdx < pChainCellCounts->u.count[chainTypeIdx];
+ chainIdx++) {
+ PredictedChainingCell *cell =
+ (PredictedChainingCell *) pChainCells;
+ /*
+ * Report the cell if it contains a sane class
+ * pointer.
+ */
+ if (cell->clazz != NULL &&
+ cell->clazz !=
+ (ClassObject *) PREDICTED_CHAIN_FAKE_CLAZZ) {
+ callback(&cell->clazz);
+ }
+ pChainCells += CHAIN_CELL_PREDICTED_SIZE >> 2;
+ }
+ }
+ }
+
+ /* Scan the class pointer pool */
+ JitTraceDescription *desc = getTraceDescriptionPointer(base);
+ int descSize = getTraceDescriptionSize(desc);
+ int *classPointerP = (int *) ((char *) desc + descSize);
+ int numClassPointers = *classPointerP++;
+ for (; numClassPointers; numClassPointers--, classPointerP++) {
+ callback(classPointerP);
+ }
+}
+
+/*
+ * Scan class pointers in each translation and pass its address to the callback
+ * function. Currently such a pointers can be found in the pointer pool and the
+ * clazz field in the predicted chaining cells.
+ */
+void dvmJitScanAllClassPointers(void (*callback)(void *))
+{
+ UNPROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
+
+ /* Handle the inflight compilation first */
+ if (gDvmJit.inflightBaseAddr)
+ findClassPointersSingleTrace((char *) gDvmJit.inflightBaseAddr,
+ callback);
+
+ if (gDvmJit.pJitEntryTable != NULL) {
+ unsigned int traceIdx;
+ dvmLockMutex(&gDvmJit.tableLock);
+ for (traceIdx = 0; traceIdx < gDvmJit.jitTableSize; traceIdx++) {
+ const JitEntry *entry = &gDvmJit.pJitEntryTable[traceIdx];
+ if (entry->dPC &&
+ !entry->u.info.isMethodEntry &&
+ entry->codeAddress &&
+ (entry->codeAddress != dvmCompilerGetInterpretTemplate())) {
+ char *base = getTraceBase(entry);
+ findClassPointersSingleTrace(base, callback);
+ }
+ }
+ dvmUnlockMutex(&gDvmJit.tableLock);
+ }
+ UPDATE_CODE_CACHE_PATCHES();
+
+ PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
+}
+
+/*
+ * Provide the final touch on the class object pointer pool to install the
+ * actual pointers. The thread has to be in the running state.
+ */
+void dvmJitInstallClassObjectPointers(CompilationUnit *cUnit, char *codeAddress)
+{
+ char *base = codeAddress - cUnit->headerSize -
+ (cUnit->instructionSet == DALVIK_JIT_ARM ? 0 : 1);
+
+ /* Scan the class pointer pool */
+ JitTraceDescription *desc = getTraceDescriptionPointer(base);
+ int descSize = getTraceDescriptionSize(desc);
+ intptr_t *classPointerP = (int *) ((char *) desc + descSize);
+ int numClassPointers = *(int *)classPointerP++;
+ intptr_t *startClassPointerP = classPointerP;
+
+ UNPROTECT_CODE_CACHE(startClassPointerP,
+ numClassPointers * sizeof(intptr_t));
+ /*
+ * Change the thread state to VM_RUNNING so that GC won't be happening
+ * when the assembler looks up the class pointers.
+ */
+ dvmChangeStatus(gDvmJit.compilerThread, THREAD_RUNNING);
+#if defined(WITH_JIT_TUNING)
+ u8 startTime = dvmGetRelativeTimeUsec();
+#endif
+ for (;numClassPointers; numClassPointers--) {
+ CallsiteInfo *callsiteInfo = (CallsiteInfo *) *classPointerP;
+ ClassObject *clazz = dvmFindClassNoInit(
+ callsiteInfo->classDescriptor, callsiteInfo->classLoader);
+ assert(!strcmp(clazz->descriptor, callsiteInfo->classDescriptor));
+ *classPointerP++ = (intptr_t) clazz;
+ }
+
+ /*
+ * Register the base address so that if GC kicks in after the thread state
+ * has been changed to VMWAIT and before the compiled code is registered
+ * in the JIT table, its content can be patched if class objects are
+ * moved.
+ */
+ gDvmJit.inflightBaseAddr = base;
+
+#if defined(WITH_JIT_TUNING)
+ u8 blockTime = dvmGetRelativeTimeUsec() - startTime;
+ gDvmJit.compilerThreadBlockGCTime += blockTime;
+ if (blockTime > gDvmJit.maxCompilerThreadBlockGCTime)
+ gDvmJit.maxCompilerThreadBlockGCTime = blockTime;
+ gDvmJit.numCompilerThreadBlockGC++;
+#endif
+ /* Change the thread state back to VMWAIT */
+ dvmChangeStatus(gDvmJit.compilerThread, THREAD_VMWAIT);
+
+ UPDATE_CODE_CACHE_PATCHES();
+
+ PROTECT_CODE_CACHE(startClassPointerP, numClassPointers * sizeof(intptr_t));
+}
+
#if defined(WITH_SELF_VERIFICATION)
/*
* The following are used to keep compiled loads and stores from modifying
* Search the existing constants in the literal pool for an exact or close match
* within specified delta (greater or equal to 0).
*/
-static ArmLIR *scanLiteralPool(CompilationUnit *cUnit, int value,
- unsigned int delta)
+static ArmLIR *scanLiteralPool(LIR *dataTarget, int value, unsigned int delta)
{
- LIR *dataTarget = cUnit->wordList;
while (dataTarget) {
if (((unsigned) (value - ((ArmLIR *) dataTarget)->operands[0])) <=
delta)
*/
/* Add a 32-bit constant either in the constant pool or mixed with code */
-static ArmLIR *addWordData(CompilationUnit *cUnit, int value, bool inPlace)
+static ArmLIR *addWordData(CompilationUnit *cUnit, LIR **constantListP,
+ int value)
{
/* Add the constant to the literal pool */
- if (!inPlace) {
+ if (constantListP) {
ArmLIR *newValue = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
newValue->operands[0] = value;
- newValue->generic.next = cUnit->wordList;
- cUnit->wordList = (LIR *) newValue;
+ newValue->generic.next = *constantListP;
+ *constantListP = (LIR *) newValue;
return newValue;
} else {
/* Add the constant in the middle of code stream */
CallsiteInfo *callsiteInfo = mir->meta.callsiteInfo;
dvmCompilerLockAllTemps(cUnit);
- loadConstant(cUnit, r1, (int) callsiteInfo->clazz);
+ loadClassPointer(cUnit, r1, (int) callsiteInfo);
loadWordDisp(cUnit, r0, offsetof(Object, clazz), r2);
/* Branch to the slow path if classes are not equal */
offsetof(Thread,
jitToInterpEntries.dvmJitToInterpNormal) >> 2);
newLIR1(cUnit, kThumbBlxR, r0);
- addWordData(cUnit, (int) (cUnit->method->insns + offset), true);
+ addWordData(cUnit, NULL, (int) (cUnit->method->insns + offset));
}
/*
offsetof(Thread,
jitToInterpEntries.dvmJitToInterpTraceSelect) >> 2);
newLIR1(cUnit, kThumbBlxR, r0);
- addWordData(cUnit, (int) (cUnit->method->insns + offset), true);
+ addWordData(cUnit, NULL, (int) (cUnit->method->insns + offset));
}
/* Chaining cell for branches that branch back into the same basic block */
offsetof(Thread, jitToInterpEntries.dvmJitToInterpNormal) >> 2);
#endif
newLIR1(cUnit, kThumbBlxR, r0);
- addWordData(cUnit, (int) (cUnit->method->insns + offset), true);
+ addWordData(cUnit, NULL, (int) (cUnit->method->insns + offset));
}
/* Chaining cell for monomorphic method invocations. */
offsetof(Thread,
jitToInterpEntries.dvmJitToInterpTraceSelect) >> 2);
newLIR1(cUnit, kThumbBlxR, r0);
- addWordData(cUnit, (int) (callee->insns), true);
+ addWordData(cUnit, NULL, (int) (callee->insns));
}
/* Chaining cell for monomorphic method invocations. */
{
/* Should not be executed in the initial state */
- addWordData(cUnit, PREDICTED_CHAIN_BX_PAIR_INIT, true);
+ addWordData(cUnit, NULL, PREDICTED_CHAIN_BX_PAIR_INIT);
/* To be filled: class */
- addWordData(cUnit, PREDICTED_CHAIN_CLAZZ_INIT, true);
+ addWordData(cUnit, NULL, PREDICTED_CHAIN_CLAZZ_INIT);
/* To be filled: method */
- addWordData(cUnit, PREDICTED_CHAIN_METHOD_INIT, true);
+ addWordData(cUnit, NULL, PREDICTED_CHAIN_METHOD_INIT);
/*
* Rechain count. The initial value of 0 here will trigger chaining upon
* the first invocation of this callsite.
*/
- addWordData(cUnit, PREDICTED_CHAIN_COUNTER_INIT, true);
+ addWordData(cUnit, NULL, PREDICTED_CHAIN_COUNTER_INIT);
}
/* Load the Dalvik PC into r0 and jump to the specified target */
rlThis = loadValue(cUnit, rlThis, kCoreReg);
int regPredictedClass = dvmCompilerAllocTemp(cUnit);
- loadConstant(cUnit, regPredictedClass, (int) callsiteInfo->clazz);
+ loadClassPointer(cUnit, regPredictedClass, (int) callsiteInfo);
genNullCheck(cUnit, rlThis.sRegLow, rlThis.lowReg, mir->offset,
NULL);/* null object? */
int regActualClass = dvmCompilerAllocTemp(cUnit);
return res;
}
/* No shortcut - go ahead and use literal pool */
- ArmLIR *dataTarget = scanLiteralPool(cUnit, value, 255);
+ ArmLIR *dataTarget = scanLiteralPool(cUnit->literalList, value, 255);
if (dataTarget == NULL) {
- dataTarget = addWordData(cUnit, value, false);
+ dataTarget = addWordData(cUnit, &cUnit->literalList, value);
}
ArmLIR *loadPcRel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
loadPcRel->opcode = kThumbLdrPcRel;
if (encodedImm >= 0) {
return newLIR2(cUnit, kThumb2Vmovs_IMM8, rDest, encodedImm);
}
- ArmLIR *dataTarget = scanLiteralPool(cUnit, value, 0);
+ ArmLIR *dataTarget = scanLiteralPool(cUnit->literalList, value, 0);
if (dataTarget == NULL) {
- dataTarget = addWordData(cUnit, value, false);
+ dataTarget = addWordData(cUnit, &cUnit->literalList, value);
}
ArmLIR *loadPcRel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
loadPcRel->opcode = kThumb2Vldrs;
return res;
}
/* No shortcut - go ahead and use literal pool */
- ArmLIR *dataTarget = scanLiteralPool(cUnit, value, 0);
+ ArmLIR *dataTarget = scanLiteralPool(cUnit->literalList, value, 0);
if (dataTarget == NULL) {
- dataTarget = addWordData(cUnit, value, false);
+ dataTarget = addWordData(cUnit, &cUnit->literalList, value);
}
ArmLIR *loadPcRel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
loadPcRel->opcode = kThumb2LdrPcRel12;
void dvmCompilerSortAndPrintTraceProfiles()
{
}
+
+void dvmJitScanAllClassPointers(void (*callback)(void *))
+{
+}
* a contiguous sequence of Dalvik byte codes.
*/
typedef struct {
- unsigned isCode:1; // If set denotes code fragments
unsigned numInsts:8; // Number of Byte codes in run
unsigned runEnd:1; // Run ends with last byte code
- jitHint hint:6; // Hint to apply to final code of run
+ jitHint hint:7; // Hint to apply to final code of run
u2 startOffset; // Starting offset for trace run
} JitCodeDesc;
* frag2
* frag3
* meta1
- * meta2
+ * :
+ * metan
* frag4
*
- * frags 1-4 have the "isCode" field set, and metas 1-2 are plain pointers or
- * pointers to auxiliary data structures as long as the LSB is null.
+ * frags 1-4 have the "isCode" field set and describe the location/length of
+ * real code traces, while metas 1-n are misc information.
* The meaning of the meta content is loosely defined. It is usually the code
* fragment right before the first meta field (frag3 in this case) to
* understand and parse them. Frag4 could be a dummy one with 0 "numInsts" but
* the "runEnd" field set.
*
* For example, if a trace run contains a method inlining target, the class
- * type of "this" and the currently resolved method pointer are two instances
- * of meta information stored there.
+ * descriptor/loader of "this" and the currently resolved method pointer are
+ * three instances of meta information stored there.
*/
-typedef union {
- JitCodeDesc frag;
- void* meta;
+typedef struct {
+ union {
+ JitCodeDesc frag;
+ void* meta;
+ } info;
+ u4 isCode:1;
+ u4 unused:31;
} JitTraceRun;
#endif
LOGD("JIT: Avg unit compilation time: %llu us",
gDvmJit.numCompilations == 0 ? 0 :
gDvmJit.jitTime / gDvmJit.numCompilations);
+ LOGD("JIT: Potential GC blocked by compiler: max %llu us / "
+ "avg %llu us (%d)",
+ gDvmJit.maxCompilerThreadBlockGCTime,
+ gDvmJit.numCompilerThreadBlockGC == 0 ?
+ 0 : gDvmJit.compilerThreadBlockGCTime /
+ gDvmJit.numCompilerThreadBlockGC,
+ gDvmJit.numCompilerThreadBlockGC);
#endif
LOGD("JIT: %d Translation chains, %d interp stubs",
const DecodedInstruction* insn)
{
int currTraceRun = ++self->currTraceRun;
- self->trace[currTraceRun].meta = (void *) thisClass;
+ self->trace[currTraceRun].info.meta = thisClass ?
+ (void *) thisClass->descriptor : NULL;
+ self->trace[currTraceRun].isCode = false;
+
+ currTraceRun = ++self->currTraceRun;
+ self->trace[currTraceRun].info.meta = thisClass ?
+ (void *) thisClass->classLoader : NULL;
+ self->trace[currTraceRun].isCode = false;
+
currTraceRun = ++self->currTraceRun;
- self->trace[currTraceRun].meta = (void *) calleeMethod;
+ self->trace[currTraceRun].info.meta = (void *) calleeMethod;
+ self->trace[currTraceRun].isCode = false;
}
/*
/* We need to start a new trace run */
int currTraceRun = ++self->currTraceRun;
self->currRunHead = moveResultPC;
- self->trace[currTraceRun].frag.startOffset = offset + len;
- self->trace[currTraceRun].frag.numInsts = 1;
- self->trace[currTraceRun].frag.runEnd = false;
- self->trace[currTraceRun].frag.hint = kJitHintNone;
- self->trace[currTraceRun].frag.isCode = true;
+ self->trace[currTraceRun].info.frag.startOffset = offset + len;
+ self->trace[currTraceRun].info.frag.numInsts = 1;
+ self->trace[currTraceRun].info.frag.runEnd = false;
+ self->trace[currTraceRun].info.frag.hint = kJitHintNone;
+ self->trace[currTraceRun].isCode = true;
self->totalTraceLen++;
self->currRunLen = dexGetWidthFromInstruction(moveResultPC);
currTraceRun = ++self->currTraceRun;
self->currRunLen = 0;
self->currRunHead = (u2*)lastPC;
- self->trace[currTraceRun].frag.startOffset = offset;
- self->trace[currTraceRun].frag.numInsts = 0;
- self->trace[currTraceRun].frag.runEnd = false;
- self->trace[currTraceRun].frag.hint = kJitHintNone;
- self->trace[currTraceRun].frag.isCode = true;
+ self->trace[currTraceRun].info.frag.startOffset = offset;
+ self->trace[currTraceRun].info.frag.numInsts = 0;
+ self->trace[currTraceRun].info.frag.runEnd = false;
+ self->trace[currTraceRun].info.frag.hint = kJitHintNone;
+ self->trace[currTraceRun].isCode = true;
}
- self->trace[self->currTraceRun].frag.numInsts++;
+ self->trace[self->currTraceRun].info.frag.numInsts++;
self->totalTraceLen++;
self->currRunLen += len;
int lastTraceDesc = self->currTraceRun;
/* Extend a new empty desc if the last slot is meta info */
- if (!self->trace[lastTraceDesc].frag.isCode) {
+ if (!self->trace[lastTraceDesc].isCode) {
lastTraceDesc = ++self->currTraceRun;
- self->trace[lastTraceDesc].frag.startOffset = 0;
- self->trace[lastTraceDesc].frag.numInsts = 0;
- self->trace[lastTraceDesc].frag.hint = kJitHintNone;
- self->trace[lastTraceDesc].frag.isCode = true;
+ self->trace[lastTraceDesc].info.frag.startOffset = 0;
+ self->trace[lastTraceDesc].info.frag.numInsts = 0;
+ self->trace[lastTraceDesc].info.frag.hint = kJitHintNone;
+ self->trace[lastTraceDesc].isCode = true;
}
/* Mark the end of the trace runs */
- self->trace[lastTraceDesc].frag.runEnd = true;
+ self->trace[lastTraceDesc].info.frag.runEnd = true;
JitTraceDescription* desc =
(JitTraceDescription*)malloc(sizeof(JitTraceDescription) +
self->totalTraceLen = 0;
self->currRunHead = self->interpSave.pc;
self->currRunLen = 0;
- self->trace[0].frag.startOffset =
+ self->trace[0].info.frag.startOffset =
self->interpSave.pc - self->interpSave.method->insns;
- self->trace[0].frag.numInsts = 0;
- self->trace[0].frag.runEnd = false;
- self->trace[0].frag.hint = kJitHintNone;
- self->trace[0].frag.isCode = true;
+ self->trace[0].info.frag.numInsts = 0;
+ self->trace[0].info.frag.runEnd = false;
+ self->trace[0].info.frag.hint = kJitHintNone;
+ self->trace[0].isCode = true;
self->lastPC = 0;
break;
/*
#endif
/*
+ * Offsets for metadata in the trace run array from the trace that ends with
+ * invoke instructions.
+ */
+#define JIT_TRACE_CLASS_DESC 1
+#define JIT_TRACE_CLASS_LOADER 2
+#define JIT_TRACE_CUR_METHOD 3
+
+/*
* JitTable hash function.
*/
(lo) Implement OP_*_VOLATILE (12 instructions)
(lo) Implement OP_RETURN_VOID_BARRIER
(lo) Implement OP_INVOKE_OBJECT_INIT
+(lo) Implement dvmJitScanAllClassPointers