2 * Copyright (C) 2008 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * Android's method call profiling goodies.
21 #include <interp/InterpDefs.h>
33 #include <cutils/open_memstream.h>
35 #ifdef HAVE_ANDROID_OS
36 # define UPDATE_MAGIC_PAGE 1
50 * u8 start date/time in usec
51 * u2 record size in bytes (version >= 2 only)
52 * ... padding to 32 bytes
56 * u4 method ID | method action
57 * u4 time delta since start, in usec
61 * u4 method ID | method action
62 * u4 time delta since start, in usec
66 * u4 method ID | method action
67 * u4 time delta since start, in usec
68 * u4 wall time since start, in usec (when clock == "dual" only)
70 * 32 bits of microseconds is 70 minutes.
72 * All values are stored in little-endian order.
74 #define TRACE_REC_SIZE_SINGLE_CLOCK 10 // using v2
75 #define TRACE_REC_SIZE_DUAL_CLOCK 14 // using v3 with two timestamps
76 #define TRACE_MAGIC 0x574f4c53
77 #define TRACE_HEADER_LEN 32
79 #define FILL_PATTERN 0xeeeeeeee
83 * Returns true if the thread CPU clock should be used.
85 static inline bool useThreadCpuClock() {
86 #if defined(HAVE_POSIX_CLOCKS)
87 return gDvm.profilerClockSource != kProfilerClockSourceWall;
94 * Returns true if the wall clock should be used.
96 static inline bool useWallClock() {
97 #if defined(HAVE_POSIX_CLOCKS)
98 return gDvm.profilerClockSource != kProfilerClockSourceThreadCpu;
105 * Get the wall-clock date/time, in usec.
107 static inline u8 getWallTimeInUsec()
111 gettimeofday(&tv, NULL);
112 return tv.tv_sec * 1000000LL + tv.tv_usec;
115 #if defined(HAVE_POSIX_CLOCKS)
117 * Get the thread-cpu time, in usec.
118 * We use this clock when we can because it enables us to track the time that
119 * a thread spends running and not blocked.
121 static inline u8 getThreadCpuTimeInUsec()
125 clock_gettime(CLOCK_THREAD_CPUTIME_ID, &tm);
126 if (!(tm.tv_nsec >= 0 && tm.tv_nsec < 1*1000*1000*1000)) {
127 LOGE("bad nsec: %ld", tm.tv_nsec);
130 return tm.tv_sec * 1000000LL + tm.tv_nsec / 1000;
135 * Get the clock used for stopwatch-like timing measurements on a single thread.
137 static inline u8 getStopwatchClock()
139 #if defined(HAVE_POSIX_CLOCKS)
140 return getThreadCpuTimeInUsec();
142 return getWallTimeInUsec();
147 * Write little-endian data.
149 static inline void storeShortLE(u1* buf, u2 val)
152 *buf++ = (u1) (val >> 8);
154 static inline void storeIntLE(u1* buf, u4 val)
157 *buf++ = (u1) (val >> 8);
158 *buf++ = (u1) (val >> 16);
159 *buf++ = (u1) (val >> 24);
161 static inline void storeLongLE(u1* buf, u8 val)
164 *buf++ = (u1) (val >> 8);
165 *buf++ = (u1) (val >> 16);
166 *buf++ = (u1) (val >> 24);
167 *buf++ = (u1) (val >> 32);
168 *buf++ = (u1) (val >> 40);
169 *buf++ = (u1) (val >> 48);
170 *buf++ = (u1) (val >> 56);
176 bool dvmProfilingStartup()
179 * Initialize "dmtrace" method profiling.
181 memset(&gDvm.methodTrace, 0, sizeof(gDvm.methodTrace));
182 dvmInitMutex(&gDvm.methodTrace.startStopLock);
183 pthread_cond_init(&gDvm.methodTrace.threadExitCond, NULL);
185 assert(!dvmCheckException(dvmThreadSelf()));
188 * Allocate storage for instruction counters.
190 gDvm.executedInstrCounts = (int*) malloc(kNumPackedOpcodes * sizeof(int));
191 if (gDvm.executedInstrCounts == NULL)
193 memset(gDvm.executedInstrCounts, 0, kNumPackedOpcodes * sizeof(int));
195 #ifdef UPDATE_MAGIC_PAGE
197 * If we're running on the emulator, there's a magic page into which
198 * we can put interpreted method information. This allows interpreted
199 * methods to show up in the emulator's code traces.
201 * We could key this off of the "ro.kernel.qemu" property, but there's
202 * no real harm in doing this on a real device.
204 int fd = open("/dev/qemu_trace", O_RDWR);
206 ALOGV("Unable to open /dev/qemu_trace");
208 gDvm.emulatorTracePage = mmap(0, SYSTEM_PAGE_SIZE, PROT_READ|PROT_WRITE,
211 if (gDvm.emulatorTracePage == MAP_FAILED) {
212 LOGE("Unable to mmap /dev/qemu_trace");
213 gDvm.emulatorTracePage = NULL;
215 *(u4*) gDvm.emulatorTracePage = 0;
219 assert(gDvm.emulatorTracePage == NULL);
226 * Free up profiling resources.
228 void dvmProfilingShutdown()
230 #ifdef UPDATE_MAGIC_PAGE
231 if (gDvm.emulatorTracePage != NULL)
232 munmap(gDvm.emulatorTracePage, SYSTEM_PAGE_SIZE);
234 free(gDvm.executedInstrCounts);
238 * Update the set of active profilers
240 static void updateActiveProfilers(ExecutionSubModes newMode, bool enable)
242 int oldValue, newValue;
244 // Update global count
246 oldValue = gDvm.activeProfilers;
247 newValue = oldValue + (enable ? 1 : -1);
249 LOGE("Can't have %d active profilers", newValue);
252 } while (android_atomic_release_cas(oldValue, newValue,
253 &gDvm.activeProfilers) != 0);
257 dvmEnableAllSubMode(newMode);
259 dvmDisableAllSubMode(newMode);
262 #if defined(WITH_JIT)
263 dvmCompilerUpdateGlobalState();
266 ALOGD("+++ active profiler count now %d", newValue);
271 * Reset the "cpuClockBase" field in all threads.
273 static void resetCpuClockBase()
277 dvmLockThreadList(NULL);
278 for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
279 thread->cpuClockBaseSet = false;
280 thread->cpuClockBase = 0;
282 dvmUnlockThreadList();
286 * Dump the thread list to the specified file.
288 static void dumpThreadList(FILE* fp) {
289 dvmLockThreadList(NULL);
290 for (Thread* thread = gDvm.threadList; thread != NULL; thread = thread->next) {
291 std::string threadName(dvmGetThreadName(thread));
292 fprintf(fp, "%d\t%s\n", thread->threadId, threadName.c_str());
294 dvmUnlockThreadList();
298 * This is a dvmHashForeach callback.
300 static int dumpMarkedMethods(void* vclazz, void* vfp)
302 DexStringCache stringCache;
303 ClassObject* clazz = (ClassObject*) vclazz;
304 FILE* fp = (FILE*) vfp;
309 dexStringCacheInit(&stringCache);
311 for (i = 0; i < clazz->virtualMethodCount; i++) {
312 meth = &clazz->virtualMethods[i];
313 if (meth->inProfile) {
314 name = dvmDescriptorToName(meth->clazz->descriptor);
315 fprintf(fp, "0x%08x\t%s\t%s\t%s\t%s\t%d\n", (int) meth,
317 dexProtoGetMethodDescriptor(&meth->prototype, &stringCache),
318 dvmGetMethodSourceFile(meth), dvmLineNumFromPC(meth, 0));
319 meth->inProfile = false;
324 for (i = 0; i < clazz->directMethodCount; i++) {
325 meth = &clazz->directMethods[i];
326 if (meth->inProfile) {
327 name = dvmDescriptorToName(meth->clazz->descriptor);
328 fprintf(fp, "0x%08x\t%s\t%s\t%s\t%s\t%d\n", (int) meth,
330 dexProtoGetMethodDescriptor(&meth->prototype, &stringCache),
331 dvmGetMethodSourceFile(meth), dvmLineNumFromPC(meth, 0));
332 meth->inProfile = false;
337 dexStringCacheRelease(&stringCache);
343 * Dump the list of "marked" methods to the specified file.
345 static void dumpMethodList(FILE* fp)
347 dvmHashTableLock(gDvm.loadedClasses);
348 dvmHashForeach(gDvm.loadedClasses, dumpMarkedMethods, (void*) fp);
349 dvmHashTableUnlock(gDvm.loadedClasses);
353 * Start method tracing. Method tracing is global to the VM (i.e. we
354 * trace all threads).
356 * This opens the output file (if an already open fd has not been supplied,
357 * and we're not going direct to DDMS) and allocates the data buffer. This
358 * takes ownership of the file descriptor, closing it on completion.
360 * On failure, we throw an exception and return.
362 void dvmMethodTraceStart(const char* traceFileName, int traceFd, int bufferSize,
363 int flags, bool directToDdms)
365 MethodTraceState* state = &gDvm.methodTrace;
367 assert(bufferSize > 0);
369 dvmLockMutex(&state->startStopLock);
370 while (state->traceEnabled != 0) {
371 ALOGI("TRACE start requested, but already in progress; stopping");
372 dvmUnlockMutex(&state->startStopLock);
373 dvmMethodTraceStop();
374 dvmLockMutex(&state->startStopLock);
376 ALOGI("TRACE STARTED: '%s' %dKB", traceFileName, bufferSize / 1024);
379 * Allocate storage and open files.
381 * We don't need to initialize the buffer, but doing so might remove
382 * some fault overhead if the pages aren't mapped until touched.
384 state->buf = (u1*) malloc(bufferSize);
385 if (state->buf == NULL) {
386 dvmThrowInternalError("buffer alloc failed");
391 state->traceFile = fopen(traceFileName, "w");
393 state->traceFile = fdopen(traceFd, "w");
395 if (state->traceFile == NULL) {
397 LOGE("Unable to open trace file '%s': %s",
398 traceFileName, strerror(err));
399 dvmThrowExceptionFmt(gDvm.exRuntimeException,
400 "Unable to open trace file '%s': %s",
401 traceFileName, strerror(err));
406 memset(state->buf, (char)FILL_PATTERN, bufferSize);
408 state->directToDdms = directToDdms;
409 state->bufferSize = bufferSize;
410 state->overflow = false;
413 * Enable alloc counts if we've been requested to do so.
415 state->flags = flags;
416 if ((flags & TRACE_ALLOC_COUNTS) != 0)
417 dvmStartAllocCounting();
419 /* reset our notion of the start time for all CPU threads */
422 state->startWhen = getWallTimeInUsec();
424 if (useThreadCpuClock() && useWallClock()) {
425 state->traceVersion = 3;
426 state->recordSize = TRACE_REC_SIZE_DUAL_CLOCK;
428 state->traceVersion = 2;
429 state->recordSize = TRACE_REC_SIZE_SINGLE_CLOCK;
435 memset(state->buf, 0, TRACE_HEADER_LEN);
436 storeIntLE(state->buf + 0, TRACE_MAGIC);
437 storeShortLE(state->buf + 4, state->traceVersion);
438 storeShortLE(state->buf + 6, TRACE_HEADER_LEN);
439 storeLongLE(state->buf + 8, state->startWhen);
440 if (state->traceVersion >= 3) {
441 storeShortLE(state->buf + 16, state->recordSize);
443 state->curOffset = TRACE_HEADER_LEN;
446 * Set the "enabled" flag. Once we do this, threads will wait to be
447 * signaled before exiting, so we have to make sure we wake them up.
449 android_atomic_release_store(true, &state->traceEnabled);
452 * ENHANCEMENT: To trace just a single thread, modify the
453 * following to take a Thread* argument, and set the appropriate
454 * interpBreak flags only on the target thread.
456 updateActiveProfilers(kSubModeMethodTrace, true);
458 dvmUnlockMutex(&state->startStopLock);
462 if (state->traceFile != NULL) {
463 fclose(state->traceFile);
464 state->traceFile = NULL;
466 if (state->buf != NULL) {
472 dvmUnlockMutex(&state->startStopLock);
476 * Run through the data buffer and pull out the methods that were visited.
477 * Set a mark so that we know which ones to output.
479 static void markTouchedMethods(int endOffset)
481 u1* ptr = gDvm.methodTrace.buf + TRACE_HEADER_LEN;
482 u1* end = gDvm.methodTrace.buf + endOffset;
483 size_t recordSize = gDvm.methodTrace.recordSize;
484 unsigned int methodVal;
488 methodVal = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16)
490 method = (Method*) METHOD_ID(methodVal);
492 method->inProfile = true;
498 * Exercises the clocks in the same way they will be during profiling.
500 static inline void measureClockOverhead()
502 #if defined(HAVE_POSIX_CLOCKS)
503 if (useThreadCpuClock()) {
504 getThreadCpuTimeInUsec();
507 if (useWallClock()) {
513 * Compute the amount of overhead in a clock call, in nsec.
515 * This value is going to vary depending on what else is going on in the
516 * system. When examined across several runs a pattern should emerge.
518 static u4 getClockOverhead()
520 u8 calStart, calElapsed;
523 calStart = getStopwatchClock();
524 for (i = 1000 * 4; i > 0; i--) {
525 measureClockOverhead();
526 measureClockOverhead();
527 measureClockOverhead();
528 measureClockOverhead();
529 measureClockOverhead();
530 measureClockOverhead();
531 measureClockOverhead();
532 measureClockOverhead();
535 calElapsed = getStopwatchClock() - calStart;
536 return (int) (calElapsed / (8*4));
540 * Returns "true" if method tracing is currently active.
542 bool dvmIsMethodTraceActive()
544 const MethodTraceState* state = &gDvm.methodTrace;
545 return state->traceEnabled;
549 * Stop method tracing. We write the buffer to disk and generate a key
550 * file so we can interpret it.
552 void dvmMethodTraceStop()
554 MethodTraceState* state = &gDvm.methodTrace;
558 * We need this to prevent somebody from starting a new trace while
559 * we're in the process of stopping the old.
561 dvmLockMutex(&state->startStopLock);
563 if (!state->traceEnabled) {
564 /* somebody already stopped it, or it was never started */
565 ALOGD("TRACE stop requested, but not running");
566 dvmUnlockMutex(&state->startStopLock);
569 updateActiveProfilers(kSubModeMethodTrace, false);
572 /* compute elapsed time */
573 elapsed = getWallTimeInUsec() - state->startWhen;
576 * Globally disable it, and allow other threads to notice. We want
577 * to stall here for at least as long as dvmMethodTraceAdd needs
578 * to finish. There's no real risk though -- it will take a while to
579 * write the data to disk, and we don't clear the buffer pointer until
580 * after that completes.
582 state->traceEnabled = false;
583 ANDROID_MEMBAR_FULL();
587 if ((state->flags & TRACE_ALLOC_COUNTS) != 0)
588 dvmStopAllocCounting();
591 * It's possible under some circumstances for a thread to have advanced
592 * the data pointer but not written the method value. It's possible
593 * (though less likely) for the data pointer to be advanced, or partial
594 * data written, while we're doing work here.
596 * To avoid seeing partially-written data, we grab state->curOffset here,
597 * and use our local copy from here on. We then scan through what's
598 * already written. If we see the fill pattern in what should be the
599 * method pointer, we cut things off early. (If we don't, we'll fail
600 * when we dereference the pointer.)
602 * There's a theoretical possibility of interrupting another thread
603 * after it has partially written the method pointer, in which case
604 * we'll likely crash when we dereference it. The possibility of
605 * this actually happening should be at or near zero. Fixing it
606 * completely could be done by writing the thread number last and
607 * using a sentinel value to indicate a partially-written record,
608 * but that requires memory barriers.
610 int finalCurOffset = state->curOffset;
612 size_t recordSize = state->recordSize;
613 if (finalCurOffset > TRACE_HEADER_LEN) {
614 u4 fillVal = METHOD_ID(FILL_PATTERN);
615 u1* scanPtr = state->buf + TRACE_HEADER_LEN;
617 while (scanPtr < state->buf + finalCurOffset) {
618 u4 methodVal = scanPtr[2] | (scanPtr[3] << 8) | (scanPtr[4] << 16)
619 | (scanPtr[5] << 24);
620 if (METHOD_ID(methodVal) == fillVal) {
621 u1* scanBase = state->buf + TRACE_HEADER_LEN;
622 ALOGW("Found unfilled record at %d (of %d)",
623 (scanPtr - scanBase) / recordSize,
624 (finalCurOffset - TRACE_HEADER_LEN) / recordSize);
625 finalCurOffset = scanPtr - state->buf;
629 scanPtr += recordSize;
633 ALOGI("TRACE STOPPED%s: writing %d records",
634 state->overflow ? " (NOTE: overflowed buffer)" : "",
635 (finalCurOffset - TRACE_HEADER_LEN) / recordSize);
636 if (gDvm.debuggerActive) {
637 ALOGW("WARNING: a debugger is active; method-tracing results "
642 * Do a quick calibration test to see how expensive our clock call is.
644 u4 clockNsec = getClockOverhead();
646 markTouchedMethods(finalCurOffset);
649 size_t memStreamSize;
650 if (state->directToDdms) {
651 assert(state->traceFile == NULL);
652 state->traceFile = open_memstream(&memStreamPtr, &memStreamSize);
653 if (state->traceFile == NULL) {
655 LOGE("Unable to open memstream");
659 assert(state->traceFile != NULL);
661 fprintf(state->traceFile, "%cversion\n", TOKEN_CHAR);
662 fprintf(state->traceFile, "%d\n", state->traceVersion);
663 fprintf(state->traceFile, "data-file-overflow=%s\n",
664 state->overflow ? "true" : "false");
665 if (useThreadCpuClock()) {
666 if (useWallClock()) {
667 fprintf(state->traceFile, "clock=dual\n");
669 fprintf(state->traceFile, "clock=thread-cpu\n");
672 fprintf(state->traceFile, "clock=wall\n");
674 fprintf(state->traceFile, "elapsed-time-usec=%llu\n", elapsed);
675 fprintf(state->traceFile, "num-method-calls=%d\n",
676 (finalCurOffset - TRACE_HEADER_LEN) / state->recordSize);
677 fprintf(state->traceFile, "clock-call-overhead-nsec=%d\n", clockNsec);
678 fprintf(state->traceFile, "vm=dalvik\n");
679 if ((state->flags & TRACE_ALLOC_COUNTS) != 0) {
680 fprintf(state->traceFile, "alloc-count=%d\n",
681 gDvm.allocProf.allocCount);
682 fprintf(state->traceFile, "alloc-size=%d\n",
683 gDvm.allocProf.allocSize);
684 fprintf(state->traceFile, "gc-count=%d\n",
685 gDvm.allocProf.gcCount);
687 fprintf(state->traceFile, "%cthreads\n", TOKEN_CHAR);
688 dumpThreadList(state->traceFile);
689 fprintf(state->traceFile, "%cmethods\n", TOKEN_CHAR);
690 dumpMethodList(state->traceFile);
691 fprintf(state->traceFile, "%cend\n", TOKEN_CHAR);
693 if (state->directToDdms) {
695 * Data is in two places: memStreamPtr and state->buf. Send
696 * the whole thing to DDMS, wrapped in an MPSE packet.
698 fflush(state->traceFile);
701 iov[0].iov_base = memStreamPtr;
702 iov[0].iov_len = memStreamSize;
703 iov[1].iov_base = state->buf;
704 iov[1].iov_len = finalCurOffset;
705 dvmDbgDdmSendChunkV(CHUNK_TYPE("MPSE"), iov, 2);
707 /* append the profiling data */
708 if (fwrite(state->buf, finalCurOffset, 1, state->traceFile) != 1) {
710 LOGE("trace fwrite(%d) failed: %s",
711 finalCurOffset, strerror(err));
712 dvmThrowExceptionFmt(gDvm.exRuntimeException,
713 "Trace data write failed: %s", strerror(err));
720 fclose(state->traceFile);
721 state->traceFile = NULL;
723 /* wake any threads that were waiting for profiling to complete */
724 dvmBroadcastCond(&state->threadExitCond);
725 dvmUnlockMutex(&state->startStopLock);
729 * We just did something with a method. Emit a record.
731 * Multiple threads may be banging on this all at once. We use atomic ops
732 * rather than mutexes for speed.
734 void dvmMethodTraceAdd(Thread* self, const Method* method, int action)
736 MethodTraceState* state = &gDvm.methodTrace;
738 int oldOffset, newOffset;
741 assert(method != NULL);
743 #if defined(HAVE_POSIX_CLOCKS)
745 * We can only access the per-thread CPU clock from within the
746 * thread, so we have to initialize the base time on the first use.
747 * (Looks like pthread_getcpuclockid(thread, &id) will do what we
748 * want, but it doesn't appear to be defined on the device.)
750 if (!self->cpuClockBaseSet) {
751 self->cpuClockBase = getThreadCpuTimeInUsec();
752 self->cpuClockBaseSet = true;
753 //ALOGI("thread base id=%d 0x%llx",
754 // self->threadId, self->cpuClockBase);
759 * Advance "curOffset" atomically.
762 oldOffset = state->curOffset;
763 newOffset = oldOffset + state->recordSize;
764 if (newOffset > state->bufferSize) {
765 state->overflow = true;
768 } while (android_atomic_release_cas(oldOffset, newOffset,
769 &state->curOffset) != 0);
771 //assert(METHOD_ACTION((u4) method) == 0);
773 methodVal = METHOD_COMBINE((u4) method, action);
776 * Write data into "oldOffset".
778 ptr = state->buf + oldOffset;
779 *ptr++ = (u1) self->threadId;
780 *ptr++ = (u1) (self->threadId >> 8);
781 *ptr++ = (u1) methodVal;
782 *ptr++ = (u1) (methodVal >> 8);
783 *ptr++ = (u1) (methodVal >> 16);
784 *ptr++ = (u1) (methodVal >> 24);
786 #if defined(HAVE_POSIX_CLOCKS)
787 if (useThreadCpuClock()) {
788 u4 cpuClockDiff = (u4) (getThreadCpuTimeInUsec() - self->cpuClockBase);
789 *ptr++ = (u1) cpuClockDiff;
790 *ptr++ = (u1) (cpuClockDiff >> 8);
791 *ptr++ = (u1) (cpuClockDiff >> 16);
792 *ptr++ = (u1) (cpuClockDiff >> 24);
796 if (useWallClock()) {
797 u4 wallClockDiff = (u4) (getWallTimeInUsec() - state->startWhen);
798 *ptr++ = (u1) wallClockDiff;
799 *ptr++ = (u1) (wallClockDiff >> 8);
800 *ptr++ = (u1) (wallClockDiff >> 16);
801 *ptr++ = (u1) (wallClockDiff >> 24);
807 * Register the METHOD_TRACE_ENTER action for the fast interpreter and
810 void dvmFastMethodTraceEnter(const Method* method, Thread* self)
812 if (self->interpBreak.ctl.subMode & kSubModeMethodTrace) {
813 dvmMethodTraceAdd(self, method, METHOD_TRACE_ENTER);
818 * Register the METHOD_TRACE_EXIT action for the fast interpreter and
819 * JIT'ed code for methods. The about-to-return callee method can be
820 * retrieved from self->interpSave.method.
822 void dvmFastMethodTraceExit(Thread* self)
824 if (self->interpBreak.ctl.subMode & kSubModeMethodTrace) {
825 dvmMethodTraceAdd(self, self->interpSave.method,
831 * Register the METHOD_TRACE_EXIT action for the fast interpreter and
832 * JIT'ed code for JNI methods. The about-to-return JNI callee method is passed
833 * in explicitly. Also used for inline-execute.
835 void dvmFastNativeMethodTraceExit(const Method* method, Thread* self)
837 if (self->interpBreak.ctl.subMode & kSubModeMethodTrace) {
838 dvmMethodTraceAdd(self, method, METHOD_TRACE_EXIT);
843 * We just did something with a method. Emit a record by setting a value
844 * in a magic memory location.
846 void dvmEmitEmulatorTrace(const Method* method, int action)
848 #ifdef UPDATE_MAGIC_PAGE
850 * We store the address of the Dalvik bytecodes to the memory-mapped
851 * trace page for normal methods. We also trace calls to native
852 * functions by storing the address of the native function to the
854 * Abstract methods don't have any bytecodes, so we don't trace them.
855 * (Abstract methods are never called, but in Dalvik they can be
856 * because we do a "late trap" to a native method to generate the
857 * abstract method exception.)
859 if (dvmIsAbstractMethod(method))
862 u4* pMagic = (u4*) gDvm.emulatorTracePage;
865 if (dvmIsNativeMethod(method)) {
867 * The "action" parameter is one of:
871 * To help the trace tools reconstruct the runtime stack containing
872 * a mix of normal plus native methods, we add 4 to the action if this
873 * is a native method.
878 * Get the address of the native function.
879 * This isn't the right address -- how do I get it?
880 * Fortunately, the trace tools can get by without the address, but
881 * it would be nice to fix this.
883 addr = (u4) method->nativeFunc;
886 * The dexlist output shows the &DexCode.insns offset value, which
887 * is offset from the start of the base DEX header. Method.insns
888 * is the absolute address, effectively offset from the start of
889 * the optimized DEX header. We either need to return the
890 * optimized DEX base file address offset by the right amount, or
891 * take the "real" address and subtract off the size of the
892 * optimized DEX header.
894 * Would be nice to factor this out at dexlist time, but we can't count
895 * on having access to the correct optimized DEX file.
897 assert(method->insns != NULL);
898 const DexOptHeader* pOptHdr = method->clazz->pDvmDex->pDexFile->pOptHeader;
899 addr = (u4) method->insns - pOptHdr->dexOffset;
902 *(pMagic+action) = addr;
903 LOGVV("Set %p = 0x%08x (%s.%s)",
904 pMagic+action, addr, method->clazz->descriptor, method->name);
909 * The GC calls this when it's about to start. We add a marker to the
910 * trace output so the tool can exclude the GC cost from the results.
912 void dvmMethodTraceGCBegin()
914 TRACE_METHOD_ENTER(dvmThreadSelf(), gDvm.methodTraceGcMethod);
916 void dvmMethodTraceGCEnd()
918 TRACE_METHOD_EXIT(dvmThreadSelf(), gDvm.methodTraceGcMethod);
922 * The class loader calls this when it's loading or initializing a class.
924 void dvmMethodTraceClassPrepBegin()
926 TRACE_METHOD_ENTER(dvmThreadSelf(), gDvm.methodTraceClassPrepMethod);
928 void dvmMethodTraceClassPrepEnd()
930 TRACE_METHOD_EXIT(dvmThreadSelf(), gDvm.methodTraceClassPrepMethod);
935 * Enable emulator trace info.
937 void dvmEmulatorTraceStart()
939 /* If we could not map the emulator trace page, then do not enable tracing */
940 if (gDvm.emulatorTracePage == NULL)
943 /* in theory we should make this an atomic inc; in practice not important */
944 gDvm.emulatorTraceEnableCount++;
945 if (gDvm.emulatorTraceEnableCount == 1)
946 ALOGD("--- emulator method traces enabled");
947 updateActiveProfilers(kSubModeEmulatorTrace, true);
951 * Disable emulator trace info.
953 void dvmEmulatorTraceStop()
955 if (gDvm.emulatorTraceEnableCount == 0) {
956 LOGE("ERROR: emulator tracing not enabled");
959 /* in theory we should make this an atomic inc; in practice not important */
960 gDvm.emulatorTraceEnableCount--;
961 if (gDvm.emulatorTraceEnableCount == 0)
962 ALOGD("--- emulator method traces disabled");
963 updateActiveProfilers(kSubModeEmulatorTrace,
964 (gDvm.emulatorTraceEnableCount != 0));
969 * Start instruction counting.
971 void dvmStartInstructionCounting()
973 /* in theory we should make this an atomic inc; in practice not important */
974 gDvm.instructionCountEnableCount++;
975 updateActiveProfilers(kSubModeInstCounting, true);
979 * Stop instruction counting.
981 void dvmStopInstructionCounting()
983 if (gDvm.instructionCountEnableCount == 0) {
984 LOGE("ERROR: instruction counting not enabled");
987 gDvm.instructionCountEnableCount--;
988 updateActiveProfilers(kSubModeInstCounting,
989 (gDvm.instructionCountEnableCount != 0));
994 * Start alloc counting. Note this doesn't affect the "active profilers"
995 * count, since the interpreter loop is not involved.
997 void dvmStartAllocCounting()
999 gDvm.allocProf.enabled = true;
1003 * Stop alloc counting.
1005 void dvmStopAllocCounting()
1007 gDvm.allocProf.enabled = false;