From 13fbc2e4bfa04cce8e181ac37d7f2b13a54aa037 Mon Sep 17 00:00:00 2001 From: buzbee Date: Tue, 14 Dec 2010 11:06:25 -0800 Subject: [PATCH] Stamp out some x86/host mode warnings Nuked a void* cast warnings and moved cacheflush into a target-specific utility wrapper. Change-Id: I36c841288b9ec7e03c0cb29b2e89db344f36fad1 --- vm/compiler/Compiler.c | 9 ++-- vm/compiler/CompilerUtility.h | 2 + vm/compiler/codegen/arm/ArchUtility.c | 6 +++ vm/compiler/codegen/arm/Assemble.c | 13 +++-- vm/compiler/codegen/x86/ArchUtility.c | 6 +++ vm/compiler/codegen/x86/Assemble.c | 1 - vm/compiler/codegen/x86/CodegenDriver.c | 9 +++- vm/compiler/template/ia32/footer.S | 8 --- .../template/out/CompilerTemplateAsm-armv5te-vfp.S | 35 ++++++------- .../template/out/CompilerTemplateAsm-armv5te.S | 59 ++++++++++++---------- .../out/CompilerTemplateAsm-armv7-a-neon.S | 35 ++++++------- .../template/out/CompilerTemplateAsm-armv7-a.S | 35 ++++++------- .../template/out/CompilerTemplateAsm-ia32.S | 8 --- 13 files changed, 112 insertions(+), 114 deletions(-) diff --git a/vm/compiler/Compiler.c b/vm/compiler/Compiler.c index fe42f4c51..c8ff62ee3 100644 --- a/vm/compiler/Compiler.c +++ b/vm/compiler/Compiler.c @@ -178,8 +178,8 @@ bool dvmCompilerSetupCodeCache(void) gDvmJit.codeCacheByteUsed = templateSize; /* Only flush the part in the code cache that is being used now */ - cacheflush((intptr_t) gDvmJit.codeCache, - (intptr_t) gDvmJit.codeCache + templateSize, 0); + dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache, + (intptr_t) gDvmJit.codeCache + templateSize, 0); int result = mprotect(gDvmJit.codeCache, gDvmJit.codeCacheSize, PROTECT_CODE_CACHE_ATTRS); @@ -281,8 +281,9 @@ static void resetCodeCache(void) memset((char *) gDvmJit.codeCache + gDvmJit.templateSize, 0, gDvmJit.codeCacheByteUsed - gDvmJit.templateSize); - cacheflush((intptr_t) gDvmJit.codeCache, - (intptr_t) gDvmJit.codeCache + gDvmJit.codeCacheByteUsed, 0); + dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache, + (intptr_t) gDvmJit.codeCache + + gDvmJit.codeCacheByteUsed, 0); PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed); diff --git a/vm/compiler/CompilerUtility.h b/vm/compiler/CompilerUtility.h index d3f2d6aef..3e65a2eb0 100644 --- a/vm/compiler/CompilerUtility.h +++ b/vm/compiler/CompilerUtility.h @@ -73,5 +73,7 @@ void dvmDumpResourceMask(struct LIR *lir, u8 mask, const char *prefix); void dvmDumpBlockBitVector(const GrowableList *blocks, char *msg, const BitVector *bv, int length); void dvmGetBlockName(struct BasicBlock *bb, char *name); +int dvmCompilerCacheFlush(long start, long end, long flags); + #endif /* _DALVIK_COMPILER_UTILITY */ diff --git a/vm/compiler/codegen/arm/ArchUtility.c b/vm/compiler/codegen/arm/ArchUtility.c index 8682c1a39..95b96c496 100644 --- a/vm/compiler/codegen/arm/ArchUtility.c +++ b/vm/compiler/codegen/arm/ArchUtility.c @@ -385,3 +385,9 @@ void dvmCompilerCodegenDump(CompilationUnit *cUnit) armLIR->operands[0]); } } + +/* Target-specific cache flushing */ +int dvmCompilerCacheFlush(long start, long end, long flags) +{ + return cacheflush(start, end, flags); +} diff --git a/vm/compiler/codegen/arm/Assemble.c b/vm/compiler/codegen/arm/Assemble.c index 16e8e155c..4154387ba 100644 --- a/vm/compiler/codegen/arm/Assemble.c +++ b/vm/compiler/codegen/arm/Assemble.c @@ -20,7 +20,6 @@ #include "../../CompilerInternals.h" #include "ArmLIR.h" #include "Codegen.h" -#include /* for cacheflush */ #include /* for protection change */ #define MAX_ASSEMBLER_RETRIES 10 @@ -1353,8 +1352,8 @@ void dvmCompilerAssembleLIR(CompilationUnit *cUnit, JitTranslationInfo *info) installDataContent(cUnit); /* Flush dcache and invalidate the icache to maintain coherence */ - cacheflush((long)cUnit->baseAddr, - (long)((char *) cUnit->baseAddr + offset), 0); + dvmCompilerCacheFlush((long)cUnit->baseAddr, + (long)((char *) cUnit->baseAddr + offset), 0); UPDATE_CODE_CACHE_PATCHES(); PROTECT_CODE_CACHE(cUnit->baseAddr, offset); @@ -1449,7 +1448,7 @@ void* dvmJitChain(void* tgtAddr, u4* branchAddr) UNPROTECT_CODE_CACHE(branchAddr, sizeof(*branchAddr)); *branchAddr = newInst; - cacheflush((long)branchAddr, (long)branchAddr + 4, 0); + dvmCompilerCacheFlush((long)branchAddr, (long)branchAddr + 4, 0); UPDATE_CODE_CACHE_PATCHES(); PROTECT_CODE_CACHE(branchAddr, sizeof(*branchAddr)); @@ -1489,7 +1488,7 @@ static void inlineCachePatchEnqueue(PredictedChainingCell *cellAddr, */ android_atomic_release_store((int32_t)newContent->clazz, (volatile int32_t *)(void *)&cellAddr->clazz); - cacheflush((intptr_t) cellAddr, (intptr_t) (cellAddr+1), 0); + dvmCompilerCacheFlush((intptr_t) cellAddr, (intptr_t) (cellAddr+1), 0); UPDATE_CODE_CACHE_PATCHES(); PROTECT_CODE_CACHE(cellAddr, sizeof(*cellAddr)); @@ -1681,7 +1680,7 @@ void dvmCompilerPatchInlineCache(void) } /* Then synchronize the I/D cache */ - cacheflush((long) minAddr, (long) (maxAddr+1), 0); + dvmCompilerCacheFlush((long) minAddr, (long) (maxAddr+1), 0); UPDATE_CODE_CACHE_PATCHES(); PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed); @@ -1802,7 +1801,7 @@ void dvmJitUnchainAll() highAddress = lastAddress; } } - cacheflush((long)lowAddress, (long)highAddress, 0); + dvmCompilerCacheFlush((long)lowAddress, (long)highAddress, 0); UPDATE_CODE_CACHE_PATCHES(); PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed); diff --git a/vm/compiler/codegen/x86/ArchUtility.c b/vm/compiler/codegen/x86/ArchUtility.c index 171c3b5ef..f7c48d628 100644 --- a/vm/compiler/codegen/x86/ArchUtility.c +++ b/vm/compiler/codegen/x86/ArchUtility.c @@ -22,3 +22,9 @@ void dvmCompilerCodegenDump(CompilationUnit *cUnit) { } + +/* Target-specific cache flushing (not needed for x86 */ +int dvmCompilerCacheFlush(long start, long end, long flags) +{ + return 0; +} diff --git a/vm/compiler/codegen/x86/Assemble.c b/vm/compiler/codegen/x86/Assemble.c index 31264ce64..3c0b3c7d6 100644 --- a/vm/compiler/codegen/x86/Assemble.c +++ b/vm/compiler/codegen/x86/Assemble.c @@ -20,7 +20,6 @@ #include "../../CompilerInternals.h" #include "X86LIR.h" #include "Codegen.h" -#include /* for cacheflush */ #include /* for protection change */ #define MAX_ASSEMBLER_RETRIES 10 diff --git a/vm/compiler/codegen/x86/CodegenDriver.c b/vm/compiler/codegen/x86/CodegenDriver.c index 4a5d481b9..4f31563ea 100644 --- a/vm/compiler/codegen/x86/CodegenDriver.c +++ b/vm/compiler/codegen/x86/CodegenDriver.c @@ -35,6 +35,7 @@ extern X86LIR *opReg(CompilationUnit *cUnit, OpKind op, int rDestSrc); static int opcodeCoverage[kNumPackedOpcodes]; static intptr_t templateEntryOffsets[TEMPLATE_LAST_MARK]; +#if 0 // Avoid compiler warnings when x86 disabled during development /* * Bail to the interpreter. Will not return to this trace. * On entry, rPC must be set correctly. @@ -79,6 +80,7 @@ static void genInterpSingleStep(CompilationUnit *cUnit, MIR *mir) storeWordDisp(cUnit, rESP, OUT_ARG0, rECX); opReg(cUnit, kOpCall, rEAX); } +#endif /* * The following are the first-level codegen routines that analyze the format @@ -211,6 +213,7 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit) /* Accept the work and start compiling */ bool dvmCompilerDoWork(CompilerWorkOrder *work) { + JitTraceDescription *desc; bool res; if (gDvmJit.codeCacheFull) { @@ -220,14 +223,16 @@ bool dvmCompilerDoWork(CompilerWorkOrder *work) switch (work->kind) { case kWorkOrderTrace: /* Start compilation with maximally allowed trace length */ - res = dvmCompileTrace(work->info, JIT_MAX_TRACE_LEN, &work->result, + desc = (JitTraceDescription *)work->info; + res = dvmCompileTrace(desc, JIT_MAX_TRACE_LEN, &work->result, work->bailPtr, 0 /* no hints */); break; case kWorkOrderTraceDebug: { bool oldPrintMe = gDvmJit.printMe; gDvmJit.printMe = true; /* Start compilation with maximally allowed trace length */ - res = dvmCompileTrace(work->info, JIT_MAX_TRACE_LEN, &work->result, + desc = (JitTraceDescription *)work->info; + res = dvmCompileTrace(desc, JIT_MAX_TRACE_LEN, &work->result, work->bailPtr, 0 /* no hints */); gDvmJit.printMe = oldPrintMe; break; diff --git a/vm/compiler/template/ia32/footer.S b/vm/compiler/template/ia32/footer.S index d11af69d1..d350c7739 100644 --- a/vm/compiler/template/ia32/footer.S +++ b/vm/compiler/template/ia32/footer.S @@ -6,14 +6,6 @@ .text .align 4 -/* - * FIXME - verify that we don't need an explicit cache flush - * for x86. - */ - .global cacheflush -cacheflush: - ret - .global dmvCompilerTemplateEnd dmvCompilerTemplateEnd: diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S index e1d052403..8efbcaa60 100644 --- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S +++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S @@ -108,17 +108,6 @@ unspecified registers or condition codes. * =========================================================================== */ -/* - * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. - * Jump to subroutine. - * - * May modify IP and LR. - */ -.macro LDR_PC_LR source - mov lr, pc - ldr pc, \source -.endm - .global dvmCompilerTemplateStart .type dvmCompilerTemplateStart, %function @@ -181,7 +170,8 @@ dvmCompiler_TEMPLATE_RETURN: stmfd sp!, {r0-r2,lr} @ preserve live registers mov r0, r6 @ r0=rGlue - LDR_PC_LR ".LdvmFastJavaMethodTraceExit" + mov lr, pc + ldr pc, .LdvmFastJavaMethodTraceExit ldmfd sp!, {r0-r2,lr} @ restore live registers #endif SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) @@ -285,7 +275,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT: stmfd sp!, {r0-r3} @ preserve r0-r3 mov r1, r6 @ r0=methodToCall, r1=rGlue - LDR_PC_LR ".LdvmFastMethodTraceEnter" + mov lr, pc + ldr pc, .LdvmFastMethodTraceEnter ldmfd sp!, {r0-r3} @ restore r0-r3 #endif @@ -344,7 +335,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN: stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers mov r1, r6 @ r0=methodToCall, r1=rGlue - LDR_PC_LR ".LdvmFastMethodTraceEnter" + mov lr, pc + ldr pc, .LdvmFastMethodTraceEnter ldmfd sp!, {r0-r2,lr} @ restore registers #endif @@ -468,7 +460,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE: mov r0, r2 mov r1, r6 @ r0=JNIMethod, r1=rGlue - LDR_PC_LR ".LdvmFastMethodTraceEnter" + mov lr, pc + ldr pc, .LdvmFastMethodTraceEnter ldmfd sp!, {r0-r3} @ restore r0-r3 #endif @@ -477,7 +470,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE: #if defined(WITH_INLINE_PROFILING) ldmfd sp!, {r0-r1} @ restore r2 and r6 @ r0=JNIMethod, r1=rGlue - LDR_PC_LR ".LdvmFastNativeMethodTraceExit" + mov lr, pc + ldr pc, .LdvmFastNativeMethodTraceExit #endif @ native return; r9=self, r10=newSaveArea @ equivalent to dvmPopJniLocals @@ -1511,15 +1505,18 @@ dvmCompiler_TEMPLATE_MONITOR_ENTER_DEBUG: stmfd sp!, {r0-r3} mov r0, r2 mov r1, r6 - LDR_PC_LR ".LdvmFastMethodTraceEnter" + mov lr, pc + ldr pc, .LdvmFastMethodTraceEnter ldmfd sp!, {r0-r3} #endif - LDR_PC_LR "[r2, #offMethod_nativeFunc]" + mov lr, pc + ldr pc, [r2, #offMethod_nativeFunc] #if defined(WITH_INLINE_PROFILING) ldmfd sp!, {r0-r1} - LDR_PC_LR ".LdvmFastNativeMethodTraceExit" + mov lr, pc + ldr pc, .LdvmFastNativeMethodTraceExit #endif @ Refresh Jit's on/off status ldr r3, [rGLUE, #offGlue_ppJitProfTable] diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S index 5a47750ee..0df3ae65a 100644 --- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S +++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S @@ -108,17 +108,6 @@ unspecified registers or condition codes. * =========================================================================== */ -/* - * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. - * Jump to subroutine. - * - * May modify IP and LR. - */ -.macro LDR_PC_LR source - mov lr, pc - ldr pc, \source -.endm - .global dvmCompilerTemplateStart .type dvmCompilerTemplateStart, %function @@ -181,7 +170,8 @@ dvmCompiler_TEMPLATE_RETURN: stmfd sp!, {r0-r2,lr} @ preserve live registers mov r0, r6 @ r0=rGlue - LDR_PC_LR ".LdvmFastJavaMethodTraceExit" + mov lr, pc + ldr pc, .LdvmFastJavaMethodTraceExit ldmfd sp!, {r0-r2,lr} @ restore live registers #endif SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) @@ -285,7 +275,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT: stmfd sp!, {r0-r3} @ preserve r0-r3 mov r1, r6 @ r0=methodToCall, r1=rGlue - LDR_PC_LR ".LdvmFastMethodTraceEnter" + mov lr, pc + ldr pc, .LdvmFastMethodTraceEnter ldmfd sp!, {r0-r3} @ restore r0-r3 #endif @@ -344,7 +335,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN: stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers mov r1, r6 @ r0=methodToCall, r1=rGlue - LDR_PC_LR ".LdvmFastMethodTraceEnter" + mov lr, pc + ldr pc, .LdvmFastMethodTraceEnter ldmfd sp!, {r0-r2,lr} @ restore registers #endif @@ -468,7 +460,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE: mov r0, r2 mov r1, r6 @ r0=JNIMethod, r1=rGlue - LDR_PC_LR ".LdvmFastMethodTraceEnter" + mov lr, pc + ldr pc, .LdvmFastMethodTraceEnter ldmfd sp!, {r0-r3} @ restore r0-r3 #endif @@ -477,7 +470,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE: #if defined(WITH_INLINE_PROFILING) ldmfd sp!, {r0-r1} @ restore r2 and r6 @ r0=JNIMethod, r1=rGlue - LDR_PC_LR ".LdvmFastNativeMethodTraceExit" + mov lr, pc + ldr pc, .LdvmFastNativeMethodTraceExit #endif @ native return; r9=self, r10=newSaveArea @ equivalent to dvmPopJniLocals @@ -527,7 +521,8 @@ dvmCompiler_TEMPLATE_CMPG_DOUBLE: /* op vAA, vBB, vCC */ push {r0-r3} @ save operands mov r11, lr @ save return address - LDR_PC_LR ".L__aeabi_cdcmple" @ PIC way of "bl __aeabi_cdcmple" + mov lr, pc + ldr pc, .L__aeabi_cdcmple @ PIC way of "bl __aeabi_cdcmple" bhi .LTEMPLATE_CMPG_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate mvncc r0, #0 @ (less than) r1<- -1 moveq r0, #0 @ (equal) r1<- 0, trumps less than @@ -540,7 +535,8 @@ dvmCompiler_TEMPLATE_CMPG_DOUBLE: .LTEMPLATE_CMPG_DOUBLE_gt_or_nan: pop {r2-r3} @ restore operands in reverse order pop {r0-r1} @ restore operands in reverse order - LDR_PC_LR ".L__aeabi_cdcmple" @ r0<- Z set if eq, C clear if < + mov lr, pc + ldr pc, .L__aeabi_cdcmple @ r0<- Z set if eq, C clear if < movcc r0, #1 @ (greater than) r1<- 1 bxcc r11 mov r0, #1 @ r1<- 1 or -1 for NaN @@ -569,7 +565,8 @@ dvmCompiler_TEMPLATE_CMPL_DOUBLE: /* op vAA, vBB, vCC */ push {r0-r3} @ save operands mov r11, lr @ save return address - LDR_PC_LR ".L__aeabi_cdcmple" @ PIC way of "bl __aeabi_cdcmple" + mov lr, pc + ldr pc, .L__aeabi_cdcmple @ PIC way of "bl __aeabi_cdcmple" bhi .LTEMPLATE_CMPL_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate mvncc r0, #0 @ (less than) r1<- -1 moveq r0, #0 @ (equal) r1<- 0, trumps less than @@ -582,7 +579,8 @@ dvmCompiler_TEMPLATE_CMPL_DOUBLE: .LTEMPLATE_CMPL_DOUBLE_gt_or_nan: pop {r2-r3} @ restore operands in reverse order pop {r0-r1} @ restore operands in reverse order - LDR_PC_LR ".L__aeabi_cdcmple" @ r0<- Z set if eq, C clear if < + mov lr, pc + ldr pc, .L__aeabi_cdcmple @ r0<- Z set if eq, C clear if < movcc r0, #1 @ (greater than) r1<- 1 bxcc r11 mvn r0, #0 @ r1<- 1 or -1 for NaN @@ -631,7 +629,8 @@ dvmCompiler_TEMPLATE_CMPG_FLOAT: mov r9, r0 @ Save copies - we may need to redo mov r10, r1 mov r11, lr @ save return address - LDR_PC_LR ".L__aeabi_cfcmple" @ cmp <=: C clear if <, Z set if eq + mov lr, pc + ldr pc, .L__aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq bhi .LTEMPLATE_CMPG_FLOAT_gt_or_nan @ C set and Z clear, disambiguate mvncc r0, #0 @ (less than) r0<- -1 moveq r0, #0 @ (equal) r0<- 0, trumps less than @@ -642,7 +641,8 @@ dvmCompiler_TEMPLATE_CMPG_FLOAT: .LTEMPLATE_CMPG_FLOAT_gt_or_nan: mov r0, r10 @ restore in reverse order mov r1, r9 - LDR_PC_LR ".L__aeabi_cfcmple" @ r0<- Z set if eq, C clear if < + mov lr, pc + ldr pc, .L__aeabi_cfcmple @ r0<- Z set if eq, C clear if < movcc r0, #1 @ (greater than) r1<- 1 bxcc r11 mov r0, #1 @ r1<- 1 or -1 for NaN @@ -691,7 +691,8 @@ dvmCompiler_TEMPLATE_CMPL_FLOAT: mov r9, r0 @ Save copies - we may need to redo mov r10, r1 mov r11, lr @ save return address - LDR_PC_LR ".L__aeabi_cfcmple" @ cmp <=: C clear if <, Z set if eq + mov lr, pc + ldr pc, .L__aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq bhi .LTEMPLATE_CMPL_FLOAT_gt_or_nan @ C set and Z clear, disambiguate mvncc r0, #0 @ (less than) r0<- -1 moveq r0, #0 @ (equal) r0<- 0, trumps less than @@ -702,7 +703,8 @@ dvmCompiler_TEMPLATE_CMPL_FLOAT: .LTEMPLATE_CMPL_FLOAT_gt_or_nan: mov r0, r10 @ restore in reverse order mov r1, r9 - LDR_PC_LR ".L__aeabi_cfcmple" @ r0<- Z set if eq, C clear if < + mov lr, pc + ldr pc, .L__aeabi_cfcmple @ r0<- Z set if eq, C clear if < movcc r0, #1 @ (greater than) r1<- 1 bxcc r11 mvn r0, #0 @ r1<- 1 or -1 for NaN @@ -1234,15 +1236,18 @@ dvmCompiler_TEMPLATE_MONITOR_ENTER_DEBUG: stmfd sp!, {r0-r3} mov r0, r2 mov r1, r6 - LDR_PC_LR ".LdvmFastMethodTraceEnter" + mov lr, pc + ldr pc, .LdvmFastMethodTraceEnter ldmfd sp!, {r0-r3} #endif - LDR_PC_LR "[r2, #offMethod_nativeFunc]" + mov lr, pc + ldr pc, [r2, #offMethod_nativeFunc] #if defined(WITH_INLINE_PROFILING) ldmfd sp!, {r0-r1} - LDR_PC_LR ".LdvmFastNativeMethodTraceExit" + mov lr, pc + ldr pc, .LdvmFastNativeMethodTraceExit #endif @ Refresh Jit's on/off status ldr r3, [rGLUE, #offGlue_ppJitProfTable] diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S index 9fb8892b3..ee3f8cbec 100644 --- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S +++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S @@ -108,17 +108,6 @@ unspecified registers or condition codes. * =========================================================================== */ -/* - * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. - * Jump to subroutine. - * - * May modify IP and LR. - */ -.macro LDR_PC_LR source - mov lr, pc - ldr pc, \source -.endm - .global dvmCompilerTemplateStart .type dvmCompilerTemplateStart, %function @@ -181,7 +170,8 @@ dvmCompiler_TEMPLATE_RETURN: stmfd sp!, {r0-r2,lr} @ preserve live registers mov r0, r6 @ r0=rGlue - LDR_PC_LR ".LdvmFastJavaMethodTraceExit" + mov lr, pc + ldr pc, .LdvmFastJavaMethodTraceExit ldmfd sp!, {r0-r2,lr} @ restore live registers #endif SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) @@ -285,7 +275,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT: stmfd sp!, {r0-r3} @ preserve r0-r3 mov r1, r6 @ r0=methodToCall, r1=rGlue - LDR_PC_LR ".LdvmFastMethodTraceEnter" + mov lr, pc + ldr pc, .LdvmFastMethodTraceEnter ldmfd sp!, {r0-r3} @ restore r0-r3 #endif @@ -344,7 +335,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN: stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers mov r1, r6 @ r0=methodToCall, r1=rGlue - LDR_PC_LR ".LdvmFastMethodTraceEnter" + mov lr, pc + ldr pc, .LdvmFastMethodTraceEnter ldmfd sp!, {r0-r2,lr} @ restore registers #endif @@ -468,7 +460,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE: mov r0, r2 mov r1, r6 @ r0=JNIMethod, r1=rGlue - LDR_PC_LR ".LdvmFastMethodTraceEnter" + mov lr, pc + ldr pc, .LdvmFastMethodTraceEnter ldmfd sp!, {r0-r3} @ restore r0-r3 #endif @@ -477,7 +470,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE: #if defined(WITH_INLINE_PROFILING) ldmfd sp!, {r0-r1} @ restore r2 and r6 @ r0=JNIMethod, r1=rGlue - LDR_PC_LR ".LdvmFastNativeMethodTraceExit" + mov lr, pc + ldr pc, .LdvmFastNativeMethodTraceExit #endif @ native return; r9=self, r10=newSaveArea @ equivalent to dvmPopJniLocals @@ -1511,15 +1505,18 @@ dvmCompiler_TEMPLATE_MONITOR_ENTER_DEBUG: stmfd sp!, {r0-r3} mov r0, r2 mov r1, r6 - LDR_PC_LR ".LdvmFastMethodTraceEnter" + mov lr, pc + ldr pc, .LdvmFastMethodTraceEnter ldmfd sp!, {r0-r3} #endif - LDR_PC_LR "[r2, #offMethod_nativeFunc]" + mov lr, pc + ldr pc, [r2, #offMethod_nativeFunc] #if defined(WITH_INLINE_PROFILING) ldmfd sp!, {r0-r1} - LDR_PC_LR ".LdvmFastNativeMethodTraceExit" + mov lr, pc + ldr pc, .LdvmFastNativeMethodTraceExit #endif @ Refresh Jit's on/off status ldr r3, [rGLUE, #offGlue_ppJitProfTable] diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S index 6d40d6000..3875f5a24 100644 --- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S +++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S @@ -108,17 +108,6 @@ unspecified registers or condition codes. * =========================================================================== */ -/* - * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. - * Jump to subroutine. - * - * May modify IP and LR. - */ -.macro LDR_PC_LR source - mov lr, pc - ldr pc, \source -.endm - .global dvmCompilerTemplateStart .type dvmCompilerTemplateStart, %function @@ -181,7 +170,8 @@ dvmCompiler_TEMPLATE_RETURN: stmfd sp!, {r0-r2,lr} @ preserve live registers mov r0, r6 @ r0=rGlue - LDR_PC_LR ".LdvmFastJavaMethodTraceExit" + mov lr, pc + ldr pc, .LdvmFastJavaMethodTraceExit ldmfd sp!, {r0-r2,lr} @ restore live registers #endif SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) @@ -285,7 +275,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT: stmfd sp!, {r0-r3} @ preserve r0-r3 mov r1, r6 @ r0=methodToCall, r1=rGlue - LDR_PC_LR ".LdvmFastMethodTraceEnter" + mov lr, pc + ldr pc, .LdvmFastMethodTraceEnter ldmfd sp!, {r0-r3} @ restore r0-r3 #endif @@ -344,7 +335,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN: stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers mov r1, r6 @ r0=methodToCall, r1=rGlue - LDR_PC_LR ".LdvmFastMethodTraceEnter" + mov lr, pc + ldr pc, .LdvmFastMethodTraceEnter ldmfd sp!, {r0-r2,lr} @ restore registers #endif @@ -468,7 +460,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE: mov r0, r2 mov r1, r6 @ r0=JNIMethod, r1=rGlue - LDR_PC_LR ".LdvmFastMethodTraceEnter" + mov lr, pc + ldr pc, .LdvmFastMethodTraceEnter ldmfd sp!, {r0-r3} @ restore r0-r3 #endif @@ -477,7 +470,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE: #if defined(WITH_INLINE_PROFILING) ldmfd sp!, {r0-r1} @ restore r2 and r6 @ r0=JNIMethod, r1=rGlue - LDR_PC_LR ".LdvmFastNativeMethodTraceExit" + mov lr, pc + ldr pc, .LdvmFastNativeMethodTraceExit #endif @ native return; r9=self, r10=newSaveArea @ equivalent to dvmPopJniLocals @@ -1511,15 +1505,18 @@ dvmCompiler_TEMPLATE_MONITOR_ENTER_DEBUG: stmfd sp!, {r0-r3} mov r0, r2 mov r1, r6 - LDR_PC_LR ".LdvmFastMethodTraceEnter" + mov lr, pc + ldr pc, .LdvmFastMethodTraceEnter ldmfd sp!, {r0-r3} #endif - LDR_PC_LR "[r2, #offMethod_nativeFunc]" + mov lr, pc + ldr pc, [r2, #offMethod_nativeFunc] #if defined(WITH_INLINE_PROFILING) ldmfd sp!, {r0-r1} - LDR_PC_LR ".LdvmFastNativeMethodTraceExit" + mov lr, pc + ldr pc, .LdvmFastNativeMethodTraceExit #endif @ Refresh Jit's on/off status ldr r3, [rGLUE, #offGlue_ppJitProfTable] diff --git a/vm/compiler/template/out/CompilerTemplateAsm-ia32.S b/vm/compiler/template/out/CompilerTemplateAsm-ia32.S index 1256ee4bd..ae548e413 100644 --- a/vm/compiler/template/out/CompilerTemplateAsm-ia32.S +++ b/vm/compiler/template/out/CompilerTemplateAsm-ia32.S @@ -97,14 +97,6 @@ dvmCompiler_TEMPLATE_INTERPRET: .text .align 4 -/* - * FIXME - verify that we don't need an explicit cache flush - * for x86. - */ - .global cacheflush -cacheflush: - ret - .global dmvCompilerTemplateEnd dmvCompilerTemplateEnd: -- 2.11.0