* We use this clock when we can because it enables us to track the time that
* a thread spends running and not blocked.
*/
-static inline u8 getThreadCpuTimeInUsec()
+static inline u8 getThreadCpuTimeInUsec(Thread* thread)
{
+ clockid_t cid;
struct timespec tm;
-
- clock_gettime(CLOCK_THREAD_CPUTIME_ID, &tm);
+ pthread_getcpuclockid(thread->handle, &cid);
+ clock_gettime(cid, &tm);
if (!(tm.tv_nsec >= 0 && tm.tv_nsec < 1*1000*1000*1000)) {
- LOGE("bad nsec: %ld", tm.tv_nsec);
+ ALOGE("bad nsec: %ld", tm.tv_nsec);
dvmAbort();
}
return tm.tv_sec * 1000000LL + tm.tv_nsec / 1000;
static inline u8 getStopwatchClock()
{
#if defined(HAVE_POSIX_CLOCKS)
- return getThreadCpuTimeInUsec();
+ return getThreadCpuTimeInUsec(dvmThreadSelf());
#else
return getWallTimeInUsec();
#endif
}
/*
+ * Gets a thread's stack trace as an array of method pointers of length pCount.
+ * The returned array must be freed by the caller.
+ */
+static const Method** getStackTrace(Thread* thread, size_t* pCount)
+{
+ void* fp = thread->interpSave.curFrame;
+ assert(thread == dvmThreadSelf() || dvmIsSuspended(thread));
+
+ /* Compute the stack depth. */
+ size_t stackDepth = 0;
+ while (fp != NULL) {
+ const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+
+ if (!dvmIsBreakFrame((u4*) fp))
+ stackDepth++;
+
+ assert(fp != saveArea->prevFrame);
+ fp = saveArea->prevFrame;
+ }
+ *pCount = stackDepth;
+
+ /*
+ * Allocate memory for stack trace. This must be freed later, either by
+ * freeThreadStackTraceSamples when tracing stops or by freeThread.
+ */
+ const Method** stackTrace = (const Method**) malloc(sizeof(Method*) *
+ stackDepth);
+ if (stackTrace == NULL)
+ return NULL;
+
+ /* Walk the stack a second time, filling in the stack trace. */
+ const Method** ptr = stackTrace;
+ fp = thread->interpSave.curFrame;
+ while (fp != NULL) {
+ const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+ const Method* method = saveArea->method;
+
+ if (!dvmIsBreakFrame((u4*) fp)) {
+ *ptr++ = method;
+ stackDepth--;
+ }
+ assert(fp != saveArea->prevFrame);
+ fp = saveArea->prevFrame;
+ }
+ assert(stackDepth == 0);
+
+ return stackTrace;
+}
+/*
+ * Get a sample of the stack trace for a thread.
+ */
+static void getSample(Thread* thread)
+{
+ /* Get old and new stack trace for thread. */
+ size_t newLength = 0;
+ const Method** newStackTrace = getStackTrace(thread, &newLength);
+ size_t oldLength = thread->stackTraceSampleLength;
+ const Method** oldStackTrace = thread->stackTraceSample;
+
+ /* Read time clocks to use for all events in this trace. */
+ u4 cpuClockDiff = 0;
+ u4 wallClockDiff = 0;
+ dvmMethodTraceReadClocks(thread, &cpuClockDiff, &wallClockDiff);
+ if (oldStackTrace == NULL) {
+ /*
+ * If there's no previous stack trace sample, log an entry event for
+ * every method in the trace.
+ */
+ for (int i = newLength - 1; i >= 0; --i) {
+ dvmMethodTraceAdd(thread, newStackTrace[i], METHOD_TRACE_ENTER,
+ cpuClockDiff, wallClockDiff);
+ }
+ } else {
+ /*
+ * If there's a previous stack trace, diff the traces and emit entry
+ * and exit events accordingly.
+ */
+ int diffIndexOld = oldLength - 1;
+ int diffIndexNew = newLength - 1;
+ /* Iterate bottom-up until there's a difference between traces. */
+ while (diffIndexOld >= 0 && diffIndexNew >= 0 &&
+ oldStackTrace[diffIndexOld] == newStackTrace[diffIndexNew]) {
+ diffIndexOld--;
+ diffIndexNew--;
+ }
+ /* Iterate top-down over old trace until diff, emitting exit events. */
+ for (int i = 0; i <= diffIndexOld; ++i) {
+ dvmMethodTraceAdd(thread, oldStackTrace[i], METHOD_TRACE_EXIT,
+ cpuClockDiff, wallClockDiff);
+ }
+ /* Iterate bottom-up over new trace from diff, emitting entry events. */
+ for (int i = diffIndexNew; i >= 0; --i) {
+ dvmMethodTraceAdd(thread, newStackTrace[i], METHOD_TRACE_ENTER,
+ cpuClockDiff, wallClockDiff);
+ }
+ }
+
+ /* Free the old stack trace and update the thread's stack trace sample. */
+ free(oldStackTrace);
+ thread->stackTraceSample = newStackTrace;
+ thread->stackTraceSampleLength = newLength;
+}
+
+/*
+ * Entry point for sampling thread. The sampling interval in microseconds is
+ * passed in as an argument.
+ */
+static void* runSamplingThread(void* arg)
+{
+ int intervalUs = (int) arg;
+ while (gDvm.methodTrace.traceEnabled) {
+ dvmSuspendAllThreads(SUSPEND_FOR_SAMPLING);
+
+ dvmLockThreadList(dvmThreadSelf());
+ for (Thread *thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+ getSample(thread);
+ }
+ dvmUnlockThreadList();
+
+ dvmResumeAllThreads(SUSPEND_FOR_SAMPLING);
+
+ usleep(intervalUs);
+ }
+ return NULL;
+}
+
+/*
* Boot-time init.
*/
bool dvmProfilingStartup()
/*
* Allocate storage for instruction counters.
*/
- gDvm.executedInstrCounts = (int*) malloc(kNumPackedOpcodes * sizeof(int));
+ gDvm.executedInstrCounts = (int*) calloc(kNumPackedOpcodes, sizeof(int));
if (gDvm.executedInstrCounts == NULL)
return false;
- memset(gDvm.executedInstrCounts, 0, kNumPackedOpcodes * sizeof(int));
#ifdef UPDATE_MAGIC_PAGE
/*
*/
int fd = open("/dev/qemu_trace", O_RDWR);
if (fd < 0) {
- LOGV("Unable to open /dev/qemu_trace");
+ ALOGV("Unable to open /dev/qemu_trace");
} else {
gDvm.emulatorTracePage = mmap(0, SYSTEM_PAGE_SIZE, PROT_READ|PROT_WRITE,
MAP_SHARED, fd, 0);
close(fd);
if (gDvm.emulatorTracePage == MAP_FAILED) {
- LOGE("Unable to mmap /dev/qemu_trace");
+ ALOGE("Unable to mmap /dev/qemu_trace");
gDvm.emulatorTracePage = NULL;
} else {
*(u4*) gDvm.emulatorTracePage = 0;
oldValue = gDvm.activeProfilers;
newValue = oldValue + (enable ? 1 : -1);
if (newValue < 0) {
- LOGE("Can't have %d active profilers", newValue);
+ ALOGE("Can't have %d active profilers", newValue);
dvmAbort();
}
} while (android_atomic_release_cas(oldValue, newValue,
dvmCompilerUpdateGlobalState();
#endif
- LOGD("+++ active profiler count now %d", newValue);
+ ALOGD("+++ active profiler count now %d", newValue);
}
}
/*
+ * Free and reset the "stackTraceSample" field in all threads.
+ */
+static void freeThreadStackTraceSamples()
+{
+ Thread* thread;
+
+ dvmLockThreadList(NULL);
+ for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+ free(thread->stackTraceSample);
+ thread->stackTraceSample = NULL;
+ }
+ dvmUnlockThreadList();
+}
+
+/*
* Dump the thread list to the specified file.
*/
static void dumpThreadList(FILE* fp) {
* On failure, we throw an exception and return.
*/
void dvmMethodTraceStart(const char* traceFileName, int traceFd, int bufferSize,
- int flags, bool directToDdms)
+ int flags, bool directToDdms, bool samplingEnabled, int intervalUs)
{
MethodTraceState* state = &gDvm.methodTrace;
dvmLockMutex(&state->startStopLock);
while (state->traceEnabled != 0) {
- LOGI("TRACE start requested, but already in progress; stopping");
+ ALOGI("TRACE start requested, but already in progress; stopping");
dvmUnlockMutex(&state->startStopLock);
dvmMethodTraceStop();
dvmLockMutex(&state->startStopLock);
}
- LOGI("TRACE STARTED: '%s' %dKB", traceFileName, bufferSize / 1024);
+ ALOGI("TRACE STARTED: '%s' %dKB", traceFileName, bufferSize / 1024);
/*
* Allocate storage and open files.
}
if (state->traceFile == NULL) {
int err = errno;
- LOGE("Unable to open trace file '%s': %s",
+ ALOGE("Unable to open trace file '%s': %s",
traceFileName, strerror(err));
dvmThrowExceptionFmt(gDvm.exRuntimeException,
"Unable to open trace file '%s': %s",
state->recordSize = TRACE_REC_SIZE_SINGLE_CLOCK;
}
+ state->samplingEnabled = samplingEnabled;
+
/*
* Output the header.
*/
* following to take a Thread* argument, and set the appropriate
* interpBreak flags only on the target thread.
*/
- updateActiveProfilers(kSubModeMethodTrace, true);
+ if (samplingEnabled) {
+ updateActiveProfilers(kSubModeSampleTrace, true);
+ /* Start the sampling thread. */
+ if (!dvmCreateInternalThread(&state->samplingThreadHandle,
+ "Sampling Thread", &runSamplingThread, (void*) intervalUs)) {
+ dvmThrowInternalError("failed to create sampling thread");
+ goto fail;
+ }
+ } else {
+ updateActiveProfilers(kSubModeMethodTrace, true);
+ }
dvmUnlockMutex(&state->startStopLock);
return;
{
#if defined(HAVE_POSIX_CLOCKS)
if (useThreadCpuClock()) {
- getThreadCpuTimeInUsec();
+ getThreadCpuTimeInUsec(dvmThreadSelf());
}
#endif
if (useWallClock()) {
}
/*
- * Returns "true" if method tracing is currently active.
+ * Indicates if method tracing is active and what kind of tracing is active.
*/
-bool dvmIsMethodTraceActive()
+TracingMode dvmGetMethodTracingMode()
{
const MethodTraceState* state = &gDvm.methodTrace;
- return state->traceEnabled;
+ if (!state->traceEnabled) {
+ return TRACING_INACTIVE;
+ } else if (state->samplingEnabled) {
+ return SAMPLE_PROFILING_ACTIVE;
+ } else {
+ return METHOD_TRACING_ACTIVE;
+ }
}
/*
void dvmMethodTraceStop()
{
MethodTraceState* state = &gDvm.methodTrace;
+ bool samplingEnabled = state->samplingEnabled;
u8 elapsed;
/*
if (!state->traceEnabled) {
/* somebody already stopped it, or it was never started */
- LOGD("TRACE stop requested, but not running");
+ ALOGD("TRACE stop requested, but not running");
dvmUnlockMutex(&state->startStopLock);
return;
} else {
- updateActiveProfilers(kSubModeMethodTrace, false);
+ if (samplingEnabled) {
+ updateActiveProfilers(kSubModeSampleTrace, false);
+ } else {
+ updateActiveProfilers(kSubModeMethodTrace, false);
+ }
}
/* compute elapsed time */
| (scanPtr[5] << 24);
if (METHOD_ID(methodVal) == fillVal) {
u1* scanBase = state->buf + TRACE_HEADER_LEN;
- LOGW("Found unfilled record at %d (of %d)",
+ ALOGW("Found unfilled record at %d (of %d)",
(scanPtr - scanBase) / recordSize,
(finalCurOffset - TRACE_HEADER_LEN) / recordSize);
finalCurOffset = scanPtr - state->buf;
}
}
- LOGI("TRACE STOPPED%s: writing %d records",
+ ALOGI("TRACE STOPPED%s: writing %d records",
state->overflow ? " (NOTE: overflowed buffer)" : "",
(finalCurOffset - TRACE_HEADER_LEN) / recordSize);
if (gDvm.debuggerActive) {
- LOGW("WARNING: a debugger is active; method-tracing results "
+ ALOGW("WARNING: a debugger is active; method-tracing results "
"will be skewed");
}
state->traceFile = open_memstream(&memStreamPtr, &memStreamSize);
if (state->traceFile == NULL) {
/* not expected */
- LOGE("Unable to open memstream");
+ ALOGE("Unable to open memstream");
dvmAbort();
}
}
/* append the profiling data */
if (fwrite(state->buf, finalCurOffset, 1, state->traceFile) != 1) {
int err = errno;
- LOGE("trace fwrite(%d) failed: %s",
+ ALOGE("trace fwrite(%d) failed: %s",
finalCurOffset, strerror(err));
dvmThrowExceptionFmt(gDvm.exRuntimeException,
"Trace data write failed: %s", strerror(err));
fclose(state->traceFile);
state->traceFile = NULL;
+ /* free and clear sampling traces held by all threads */
+ if (samplingEnabled) {
+ freeThreadStackTraceSamples();
+ }
+
/* wake any threads that were waiting for profiling to complete */
dvmBroadcastCond(&state->threadExitCond);
dvmUnlockMutex(&state->startStopLock);
+
+ /* make sure the sampling thread has stopped */
+ if (samplingEnabled &&
+ pthread_join(state->samplingThreadHandle, NULL) != 0) {
+ ALOGW("Sampling thread join failed");
+ }
+}
+
+/*
+ * Read clocks and generate time diffs for method trace events.
+ */
+void dvmMethodTraceReadClocks(Thread* self, u4* cpuClockDiff,
+ u4* wallClockDiff)
+{
+#if defined(HAVE_POSIX_CLOCKS)
+ if (useThreadCpuClock()) {
+ if (!self->cpuClockBaseSet) {
+ /* Initialize per-thread CPU clock base time on first use. */
+ self->cpuClockBase = getThreadCpuTimeInUsec(self);
+ self->cpuClockBaseSet = true;
+ } else {
+ *cpuClockDiff = getThreadCpuTimeInUsec(self) - self->cpuClockBase;
+ }
+ }
+#endif
+ if (useWallClock()) {
+ *wallClockDiff = getWallTimeInUsec() - gDvm.methodTrace.startWhen;
+ }
}
/*
* Multiple threads may be banging on this all at once. We use atomic ops
* rather than mutexes for speed.
*/
-void dvmMethodTraceAdd(Thread* self, const Method* method, int action)
+void dvmMethodTraceAdd(Thread* self, const Method* method, int action,
+ u4 cpuClockDiff, u4 wallClockDiff)
{
MethodTraceState* state = &gDvm.methodTrace;
u4 methodVal;
assert(method != NULL);
-#if defined(HAVE_POSIX_CLOCKS)
- /*
- * We can only access the per-thread CPU clock from within the
- * thread, so we have to initialize the base time on the first use.
- * (Looks like pthread_getcpuclockid(thread, &id) will do what we
- * want, but it doesn't appear to be defined on the device.)
- */
- if (!self->cpuClockBaseSet) {
- self->cpuClockBase = getThreadCpuTimeInUsec();
- self->cpuClockBaseSet = true;
- //LOGI("thread base id=%d 0x%llx",
- // self->threadId, self->cpuClockBase);
- }
-#endif
-
/*
* Advance "curOffset" atomically.
*/
#if defined(HAVE_POSIX_CLOCKS)
if (useThreadCpuClock()) {
- u4 cpuClockDiff = (u4) (getThreadCpuTimeInUsec() - self->cpuClockBase);
*ptr++ = (u1) cpuClockDiff;
*ptr++ = (u1) (cpuClockDiff >> 8);
*ptr++ = (u1) (cpuClockDiff >> 16);
#endif
if (useWallClock()) {
- u4 wallClockDiff = (u4) (getWallTimeInUsec() - state->startWhen);
*ptr++ = (u1) wallClockDiff;
*ptr++ = (u1) (wallClockDiff >> 8);
*ptr++ = (u1) (wallClockDiff >> 16);
void dvmFastMethodTraceEnter(const Method* method, Thread* self)
{
if (self->interpBreak.ctl.subMode & kSubModeMethodTrace) {
- dvmMethodTraceAdd(self, method, METHOD_TRACE_ENTER);
+ u4 cpuClockDiff = 0;
+ u4 wallClockDiff = 0;
+ dvmMethodTraceReadClocks(self, &cpuClockDiff, &wallClockDiff);
+ dvmMethodTraceAdd(self, method, METHOD_TRACE_ENTER, cpuClockDiff,
+ wallClockDiff);
}
}
void dvmFastMethodTraceExit(Thread* self)
{
if (self->interpBreak.ctl.subMode & kSubModeMethodTrace) {
+ u4 cpuClockDiff = 0;
+ u4 wallClockDiff = 0;
+ dvmMethodTraceReadClocks(self, &cpuClockDiff, &wallClockDiff);
dvmMethodTraceAdd(self, self->interpSave.method,
- METHOD_TRACE_EXIT);
+ METHOD_TRACE_EXIT, cpuClockDiff, wallClockDiff);
}
}
void dvmFastNativeMethodTraceExit(const Method* method, Thread* self)
{
if (self->interpBreak.ctl.subMode & kSubModeMethodTrace) {
- dvmMethodTraceAdd(self, method, METHOD_TRACE_EXIT);
+ u4 cpuClockDiff = 0;
+ u4 wallClockDiff = 0;
+ dvmMethodTraceReadClocks(self, &cpuClockDiff, &wallClockDiff);
+ dvmMethodTraceAdd(self, method, METHOD_TRACE_EXIT, cpuClockDiff,
+ wallClockDiff);
}
}
/* in theory we should make this an atomic inc; in practice not important */
gDvm.emulatorTraceEnableCount++;
if (gDvm.emulatorTraceEnableCount == 1)
- LOGD("--- emulator method traces enabled");
+ ALOGD("--- emulator method traces enabled");
updateActiveProfilers(kSubModeEmulatorTrace, true);
}
void dvmEmulatorTraceStop()
{
if (gDvm.emulatorTraceEnableCount == 0) {
- LOGE("ERROR: emulator tracing not enabled");
+ ALOGE("ERROR: emulator tracing not enabled");
return;
}
/* in theory we should make this an atomic inc; in practice not important */
gDvm.emulatorTraceEnableCount--;
if (gDvm.emulatorTraceEnableCount == 0)
- LOGD("--- emulator method traces disabled");
+ ALOGD("--- emulator method traces disabled");
updateActiveProfilers(kSubModeEmulatorTrace,
(gDvm.emulatorTraceEnableCount != 0));
}
void dvmStopInstructionCounting()
{
if (gDvm.instructionCountEnableCount == 0) {
- LOGE("ERROR: instruction counting not enabled");
+ ALOGE("ERROR: instruction counting not enabled");
dvmAbort();
}
gDvm.instructionCountEnableCount--;