2 * Copyright (C) 2009 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * This file contains codegen and support common to all supported
19 * ARM variants. It is included by:
21 * Codegen-$(TARGET_ARCH_VARIANT).c
23 * which combines this common code with specific support found in the
24 * applicable directory below this one.
27 #include "compiler/Loop.h"
29 /* Array holding the entry offset of each template relative to the first one */
30 static intptr_t templateEntryOffsets[TEMPLATE_LAST_MARK];
32 /* Track exercised opcodes */
33 static int opcodeCoverage[256];
35 #if defined(WITH_SELF_VERIFICATION)
36 /* Prevent certain opcodes from being jitted */
37 static inline bool selfVerificationPuntOps(OpCode op)
39 return (op == OP_MONITOR_ENTER || op == OP_MONITOR_EXIT ||
40 op == OP_NEW_INSTANCE || op == OP_NEW_ARRAY);
44 * The following are used to keep compiled loads and stores from modifying
45 * memory during self verification mode.
47 * Stores do not modify memory. Instead, the address and value pair are stored
48 * into heapSpace. Addresses within heapSpace are unique. For accesses smaller
49 * than a word, the word containing the address is loaded first before being
52 * Loads check heapSpace first and return data from there if an entry exists.
53 * Otherwise, data is loaded from memory as usual.
56 /* Decode contents of heapArgSpace to determine addr to load from */
57 static void selfVerificationLoadDecode(HeapArgSpace* heapArgSpace, int* addr)
59 int reg = heapArgSpace->regMap & 0xF;
63 *addr = heapArgSpace->r0;
66 *addr = heapArgSpace->r1;
69 *addr = heapArgSpace->r2;
72 *addr = heapArgSpace->r3;
75 LOGE("ERROR: bad reg used in selfVerificationLoadDecode: %d", reg);
80 /* Decode contents of heapArgSpace to determine reg to load into */
81 static void selfVerificationLoadDecodeData(HeapArgSpace* heapArgSpace,
86 heapArgSpace->r0 = data;
89 heapArgSpace->r1 = data;
92 heapArgSpace->r2 = data;
95 heapArgSpace->r3 = data;
98 LOGE("ERROR: bad reg passed to selfVerificationLoadDecodeData: %d",
104 static void selfVerificationLoad(InterpState* interpState)
106 Thread *self = dvmThreadSelf();
107 ShadowHeap *heapSpacePtr;
108 ShadowSpace *shadowSpace = self->shadowSpace;
109 HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
112 selfVerificationLoadDecode(heapArgSpace, &addr);
114 for (heapSpacePtr = shadowSpace->heapSpace;
115 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
116 if (heapSpacePtr->addr == addr) {
117 data = heapSpacePtr->data;
122 if (heapSpacePtr == shadowSpace->heapSpaceTail)
123 data = *((unsigned int*) addr);
125 int reg = (heapArgSpace->regMap >> 4) & 0xF;
127 //LOGD("*** HEAP LOAD: Reg:%d Addr: 0x%x Data: 0x%x", reg, addr, data);
129 selfVerificationLoadDecodeData(heapArgSpace, data, reg);
132 static void selfVerificationLoadByte(InterpState* interpState)
134 Thread *self = dvmThreadSelf();
135 ShadowHeap *heapSpacePtr;
136 ShadowSpace *shadowSpace = self->shadowSpace;
137 HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
140 selfVerificationLoadDecode(heapArgSpace, &addr);
142 int maskedAddr = addr & 0xFFFFFFFC;
143 int alignment = addr & 0x3;
145 for (heapSpacePtr = shadowSpace->heapSpace;
146 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
147 if (heapSpacePtr->addr == maskedAddr) {
148 addr = ((unsigned int) &(heapSpacePtr->data)) | alignment;
149 data = *((unsigned char*) addr);
154 if (heapSpacePtr == shadowSpace->heapSpaceTail)
155 data = *((unsigned char*) addr);
157 //LOGD("*** HEAP LOAD BYTE: Addr: 0x%x Data: 0x%x", addr, data);
159 int reg = (heapArgSpace->regMap >> 4) & 0xF;
160 selfVerificationLoadDecodeData(heapArgSpace, data, reg);
163 static void selfVerificationLoadHalfword(InterpState* interpState)
165 Thread *self = dvmThreadSelf();
166 ShadowHeap *heapSpacePtr;
167 ShadowSpace *shadowSpace = self->shadowSpace;
168 HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
171 selfVerificationLoadDecode(heapArgSpace, &addr);
173 int maskedAddr = addr & 0xFFFFFFFC;
174 int alignment = addr & 0x2;
176 for (heapSpacePtr = shadowSpace->heapSpace;
177 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
178 if (heapSpacePtr->addr == maskedAddr) {
179 addr = ((unsigned int) &(heapSpacePtr->data)) | alignment;
180 data = *((unsigned short*) addr);
185 if (heapSpacePtr == shadowSpace->heapSpaceTail)
186 data = *((unsigned short*) addr);
188 //LOGD("*** HEAP LOAD HALFWORD: Addr: 0x%x Data: 0x%x", addr, data);
190 int reg = (heapArgSpace->regMap >> 4) & 0xF;
191 selfVerificationLoadDecodeData(heapArgSpace, data, reg);
194 static void selfVerificationLoadSignedByte(InterpState* interpState)
196 Thread *self = dvmThreadSelf();
197 ShadowHeap* heapSpacePtr;
198 ShadowSpace* shadowSpace = self->shadowSpace;
199 HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
202 selfVerificationLoadDecode(heapArgSpace, &addr);
204 int maskedAddr = addr & 0xFFFFFFFC;
205 int alignment = addr & 0x3;
207 for (heapSpacePtr = shadowSpace->heapSpace;
208 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
209 if (heapSpacePtr->addr == maskedAddr) {
210 addr = ((unsigned int) &(heapSpacePtr->data)) | alignment;
211 data = *((signed char*) addr);
216 if (heapSpacePtr == shadowSpace->heapSpaceTail)
217 data = *((signed char*) addr);
219 //LOGD("*** HEAP LOAD SIGNED BYTE: Addr: 0x%x Data: 0x%x", addr, data);
221 int reg = (heapArgSpace->regMap >> 4) & 0xF;
222 selfVerificationLoadDecodeData(heapArgSpace, data, reg);
225 static void selfVerificationLoadSignedHalfword(InterpState* interpState)
227 Thread *self = dvmThreadSelf();
228 ShadowHeap* heapSpacePtr;
229 ShadowSpace* shadowSpace = self->shadowSpace;
230 HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
233 selfVerificationLoadDecode(heapArgSpace, &addr);
235 int maskedAddr = addr & 0xFFFFFFFC;
236 int alignment = addr & 0x2;
238 for (heapSpacePtr = shadowSpace->heapSpace;
239 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
240 if (heapSpacePtr->addr == maskedAddr) {
241 addr = ((unsigned int) &(heapSpacePtr->data)) | alignment;
242 data = *((signed short*) addr);
247 if (heapSpacePtr == shadowSpace->heapSpaceTail)
248 data = *((signed short*) addr);
250 //LOGD("*** HEAP LOAD SIGNED HALFWORD: Addr: 0x%x Data: 0x%x", addr, data);
252 int reg = (heapArgSpace->regMap >> 4) & 0xF;
253 selfVerificationLoadDecodeData(heapArgSpace, data, reg);
256 static void selfVerificationLoadDoubleword(InterpState* interpState)
258 Thread *self = dvmThreadSelf();
259 ShadowHeap* heapSpacePtr;
260 ShadowSpace* shadowSpace = self->shadowSpace;
261 HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
264 selfVerificationLoadDecode(heapArgSpace, &addr);
267 unsigned int data = *((unsigned int*) addr);
268 unsigned int data2 = *((unsigned int*) addr2);
270 for (heapSpacePtr = shadowSpace->heapSpace;
271 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
272 if (heapSpacePtr->addr == addr) {
273 data = heapSpacePtr->data;
274 } else if (heapSpacePtr->addr == addr2) {
275 data2 = heapSpacePtr->data;
279 //LOGD("*** HEAP LOAD DOUBLEWORD: Addr: 0x%x Data: 0x%x Data2: 0x%x",
280 // addr, data, data2);
282 int reg = (heapArgSpace->regMap >> 4) & 0xF;
283 int reg2 = (heapArgSpace->regMap >> 8) & 0xF;
284 selfVerificationLoadDecodeData(heapArgSpace, data, reg);
285 selfVerificationLoadDecodeData(heapArgSpace, data2, reg2);
288 /* Decode contents of heapArgSpace to determine arguments to store. */
289 static void selfVerificationStoreDecode(HeapArgSpace* heapArgSpace,
294 *value = heapArgSpace->r0;
297 *value = heapArgSpace->r1;
300 *value = heapArgSpace->r2;
303 *value = heapArgSpace->r3;
306 LOGE("ERROR: bad reg passed to selfVerificationStoreDecode: %d",
312 static void selfVerificationStore(InterpState* interpState)
314 Thread *self = dvmThreadSelf();
315 ShadowHeap *heapSpacePtr;
316 ShadowSpace *shadowSpace = self->shadowSpace;
317 HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
320 int reg0 = heapArgSpace->regMap & 0xF;
321 int reg1 = (heapArgSpace->regMap >> 4) & 0xF;
322 selfVerificationStoreDecode(heapArgSpace, &addr, reg0);
323 selfVerificationStoreDecode(heapArgSpace, &data, reg1);
325 //LOGD("*** HEAP STORE: Addr: 0x%x Data: 0x%x", addr, data);
327 for (heapSpacePtr = shadowSpace->heapSpace;
328 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
329 if (heapSpacePtr->addr == addr) break;
332 if (heapSpacePtr == shadowSpace->heapSpaceTail) {
333 heapSpacePtr->addr = addr;
334 shadowSpace->heapSpaceTail++;
337 heapSpacePtr->data = data;
340 static void selfVerificationStoreByte(InterpState* interpState)
342 Thread *self = dvmThreadSelf();
343 ShadowHeap *heapSpacePtr;
344 ShadowSpace *shadowSpace = self->shadowSpace;
345 HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
348 int reg0 = heapArgSpace->regMap & 0xF;
349 int reg1 = (heapArgSpace->regMap >> 4) & 0xF;
350 selfVerificationStoreDecode(heapArgSpace, &addr, reg0);
351 selfVerificationStoreDecode(heapArgSpace, &data, reg1);
353 int maskedAddr = addr & 0xFFFFFFFC;
354 int alignment = addr & 0x3;
356 //LOGD("*** HEAP STORE BYTE: Addr: 0x%x Data: 0x%x", addr, data);
358 for (heapSpacePtr = shadowSpace->heapSpace;
359 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
360 if (heapSpacePtr->addr == maskedAddr) break;
363 if (heapSpacePtr == shadowSpace->heapSpaceTail) {
364 heapSpacePtr->addr = maskedAddr;
365 heapSpacePtr->data = *((unsigned int*) maskedAddr);
366 shadowSpace->heapSpaceTail++;
369 addr = ((unsigned int) &(heapSpacePtr->data)) | alignment;
370 *((unsigned char*) addr) = (char) data;
372 //LOGD("*** HEAP STORE BYTE: Addr: 0x%x Final Data: 0x%x",
373 // addr, heapSpacePtr->data);
376 static void selfVerificationStoreHalfword(InterpState* interpState)
378 Thread *self = dvmThreadSelf();
379 ShadowHeap *heapSpacePtr;
380 ShadowSpace *shadowSpace = self->shadowSpace;
381 HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
384 int reg0 = heapArgSpace->regMap & 0xF;
385 int reg1 = (heapArgSpace->regMap >> 4) & 0xF;
386 selfVerificationStoreDecode(heapArgSpace, &addr, reg0);
387 selfVerificationStoreDecode(heapArgSpace, &data, reg1);
389 int maskedAddr = addr & 0xFFFFFFFC;
390 int alignment = addr & 0x2;
392 //LOGD("*** HEAP STORE HALFWORD: Addr: 0x%x Data: 0x%x", addr, data);
394 for (heapSpacePtr = shadowSpace->heapSpace;
395 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
396 if (heapSpacePtr->addr == maskedAddr) break;
399 if (heapSpacePtr == shadowSpace->heapSpaceTail) {
400 heapSpacePtr->addr = maskedAddr;
401 heapSpacePtr->data = *((unsigned int*) maskedAddr);
402 shadowSpace->heapSpaceTail++;
405 addr = ((unsigned int) &(heapSpacePtr->data)) | alignment;
406 *((unsigned short*) addr) = (short) data;
408 //LOGD("*** HEAP STORE HALFWORD: Addr: 0x%x Final Data: 0x%x",
409 // addr, heapSpacePtr->data);
412 static void selfVerificationStoreDoubleword(InterpState* interpState)
414 Thread *self = dvmThreadSelf();
415 ShadowHeap *heapSpacePtr;
416 ShadowSpace *shadowSpace = self->shadowSpace;
417 HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
419 int addr, data, data2;
420 int reg0 = heapArgSpace->regMap & 0xF;
421 int reg1 = (heapArgSpace->regMap >> 4) & 0xF;
422 int reg2 = (heapArgSpace->regMap >> 8) & 0xF;
423 selfVerificationStoreDecode(heapArgSpace, &addr, reg0);
424 selfVerificationStoreDecode(heapArgSpace, &data, reg1);
425 selfVerificationStoreDecode(heapArgSpace, &data2, reg2);
428 bool store1 = false, store2 = false;
430 //LOGD("*** HEAP STORE DOUBLEWORD: Addr: 0x%x Data: 0x%x, Data2: 0x%x",
431 // addr, data, data2);
433 for (heapSpacePtr = shadowSpace->heapSpace;
434 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
435 if (heapSpacePtr->addr == addr) {
436 heapSpacePtr->data = data;
438 } else if (heapSpacePtr->addr == addr2) {
439 heapSpacePtr->data = data2;
445 shadowSpace->heapSpaceTail->addr = addr;
446 shadowSpace->heapSpaceTail->data = data;
447 shadowSpace->heapSpaceTail++;
450 shadowSpace->heapSpaceTail->addr = addr2;
451 shadowSpace->heapSpaceTail->data = data2;
452 shadowSpace->heapSpaceTail++;
456 /* Common wrapper function for all memory operations */
457 static void selfVerificationMemOpWrapper(CompilationUnit *cUnit, int regMap,
460 int regMask = (1 << r4PC) | (1 << r3) | (1 << r2) | (1 << r1) | (1 << r0);
462 /* r7 <- InterpState->heapArgSpace */
463 loadConstant(cUnit, r4PC, offsetof(InterpState, heapArgSpace));
464 newLIR3(cUnit, THUMB_ADD_RRR, r7, rGLUE, r4PC);
466 /* Save out values to heapArgSpace */
467 loadConstant(cUnit, r4PC, regMap);
468 newLIR2(cUnit, THUMB_STMIA, r7, regMask);
470 /* Pass interpState pointer to function */
471 newLIR2(cUnit, THUMB_MOV_RR, r0, rGLUE);
473 /* Set function pointer and branch */
474 loadConstant(cUnit, r1, (int) funct);
475 newLIR1(cUnit, THUMB_BLX_R, r1);
477 /* r7 <- InterpState->heapArgSpace */
478 loadConstant(cUnit, r4PC, offsetof(InterpState, heapArgSpace));
479 newLIR3(cUnit, THUMB_ADD_RRR, r7, rGLUE, r4PC);
481 /* Restore register state */
482 newLIR2(cUnit, THUMB_LDMIA, r7, regMask);
487 * Mark load/store instructions that access Dalvik registers through rFP +
490 static void annotateDalvikRegAccess(ArmLIR *lir, int regId, bool isLoad)
493 lir->useMask |= ENCODE_DALVIK_REG;
495 lir->defMask |= ENCODE_DALVIK_REG;
499 * Store the Dalvik register id in aliasInfo. Mark he MSB if it is a 64-bit
502 lir->aliasInfo = regId;
503 if (DOUBLEREG(lir->operands[0])) {
504 lir->aliasInfo |= 0x80000000;
509 * Decode the register id and mark the corresponding bit(s).
511 static inline void setupRegMask(u8 *mask, int reg)
515 int regId = reg & 0x1f;
518 * Each double register is equal to a pair of single-precision FP registers
520 seed = DOUBLEREG(reg) ? 3 : 1;
521 /* FP register starts at bit position 16 */
522 shift = FPREG(reg) ? kFPReg0 : 0;
523 /* Expand the double register id into single offset */
525 *mask |= seed << shift;
529 * Set up the proper fields in the resource mask
531 static void setupResourceMasks(ArmLIR *lir)
533 int opCode = lir->opCode;
537 lir->useMask = lir->defMask = 0;
541 flags = EncodingMap[lir->opCode].flags;
543 /* Set up the mask for resources that are updated */
544 if (flags & IS_BRANCH) {
545 lir->defMask |= ENCODE_REG_PC;
546 lir->useMask |= ENCODE_REG_PC;
549 if (flags & REG_DEF0) {
550 setupRegMask(&lir->defMask, lir->operands[0]);
553 if (flags & REG_DEF1) {
554 setupRegMask(&lir->defMask, lir->operands[1]);
557 if (flags & REG_DEF_SP) {
558 lir->defMask |= ENCODE_REG_SP;
561 if (flags & REG_DEF_SP) {
562 lir->defMask |= ENCODE_REG_LR;
565 if (flags & REG_DEF_LIST0) {
566 lir->defMask |= ENCODE_REG_LIST(lir->operands[0]);
569 if (flags & REG_DEF_LIST1) {
570 lir->defMask |= ENCODE_REG_LIST(lir->operands[1]);
573 if (flags & SETS_CCODES) {
574 lir->defMask |= ENCODE_CCODE;
577 /* Conservatively treat the IT block */
579 lir->defMask = ENCODE_ALL;
582 /* Set up the mask for resources that are used */
583 if (flags & IS_BRANCH) {
584 lir->useMask |= ENCODE_REG_PC;
587 if (flags & (REG_USE0 | REG_USE1 | REG_USE2)) {
590 for (i = 0; i < 3; i++) {
591 if (flags & (1 << (kRegUse0 + i))) {
592 setupRegMask(&lir->useMask, lir->operands[i]);
597 if (flags & REG_USE_PC) {
598 lir->useMask |= ENCODE_REG_PC;
601 if (flags & REG_USE_SP) {
602 lir->useMask |= ENCODE_REG_SP;
605 if (flags & REG_USE_LIST0) {
606 lir->useMask |= ENCODE_REG_LIST(lir->operands[0]);
609 if (flags & REG_USE_LIST1) {
610 lir->useMask |= ENCODE_REG_LIST(lir->operands[1]);
613 if (flags & USES_CCODES) {
614 lir->useMask |= ENCODE_CCODE;
619 * The following are building blocks to construct low-level IRs with 0 - 4
622 static ArmLIR *newLIR0(CompilationUnit *cUnit, ArmOpCode opCode)
624 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
625 assert(isPseudoOpCode(opCode) || (EncodingMap[opCode].flags & NO_OPERAND));
626 insn->opCode = opCode;
627 setupResourceMasks(insn);
628 dvmCompilerAppendLIR(cUnit, (LIR *) insn);
632 static ArmLIR *newLIR1(CompilationUnit *cUnit, ArmOpCode opCode,
635 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
636 assert(isPseudoOpCode(opCode) || (EncodingMap[opCode].flags & IS_UNARY_OP));
637 insn->opCode = opCode;
638 insn->operands[0] = dest;
639 setupResourceMasks(insn);
640 dvmCompilerAppendLIR(cUnit, (LIR *) insn);
644 static ArmLIR *newLIR2(CompilationUnit *cUnit, ArmOpCode opCode,
647 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
648 assert(isPseudoOpCode(opCode) ||
649 (EncodingMap[opCode].flags & IS_BINARY_OP));
650 insn->opCode = opCode;
651 insn->operands[0] = dest;
652 insn->operands[1] = src1;
653 setupResourceMasks(insn);
654 dvmCompilerAppendLIR(cUnit, (LIR *) insn);
658 static ArmLIR *newLIR3(CompilationUnit *cUnit, ArmOpCode opCode,
659 int dest, int src1, int src2)
661 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
662 assert(isPseudoOpCode(opCode) ||
663 (EncodingMap[opCode].flags & IS_TERTIARY_OP));
664 insn->opCode = opCode;
665 insn->operands[0] = dest;
666 insn->operands[1] = src1;
667 insn->operands[2] = src2;
668 setupResourceMasks(insn);
669 dvmCompilerAppendLIR(cUnit, (LIR *) insn);
673 static ArmLIR *newLIR4(CompilationUnit *cUnit, ArmOpCode opCode,
674 int dest, int src1, int src2, int info)
676 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
677 assert(isPseudoOpCode(opCode) ||
678 (EncodingMap[opCode].flags & IS_QUAD_OP));
679 insn->opCode = opCode;
680 insn->operands[0] = dest;
681 insn->operands[1] = src1;
682 insn->operands[2] = src2;
683 insn->operands[3] = info;
684 setupResourceMasks(insn);
685 dvmCompilerAppendLIR(cUnit, (LIR *) insn);
690 * If the next instruction is a move-result or move-result-long,
691 * return the target Dalvik instruction and convert the next to a
692 * nop. Otherwise, return -1. Used to optimize method inlining.
694 static int inlinedTarget(MIR *mir)
697 ((mir->next->dalvikInsn.opCode == OP_MOVE_RESULT) ||
698 (mir->next->dalvikInsn.opCode == OP_MOVE_RESULT_OBJECT) ||
699 (mir->next->dalvikInsn.opCode == OP_MOVE_RESULT_WIDE))) {
700 mir->next->dalvikInsn.opCode = OP_NOP;
701 return mir->next->dalvikInsn.vA;
710 * The following are building blocks to insert constants into the pool or
711 * instruction streams.
714 /* Add a 32-bit constant either in the constant pool or mixed with code */
715 static ArmLIR *addWordData(CompilationUnit *cUnit, int value, bool inPlace)
717 /* Add the constant to the literal pool */
719 ArmLIR *newValue = dvmCompilerNew(sizeof(ArmLIR), true);
720 newValue->operands[0] = value;
721 newValue->generic.next = cUnit->wordList;
722 cUnit->wordList = (LIR *) newValue;
725 /* Add the constant in the middle of code stream */
726 newLIR1(cUnit, ARM_16BIT_DATA, (value & 0xffff));
727 newLIR1(cUnit, ARM_16BIT_DATA, (value >> 16));
733 * Search the existing constants in the literal pool for an exact or close match
734 * within specified delta (greater or equal to 0).
736 static ArmLIR *scanLiteralPool(CompilationUnit *cUnit, int value,
739 LIR *dataTarget = cUnit->wordList;
741 if (((unsigned) (value - ((ArmLIR *) dataTarget)->operands[0])) <=
743 return (ArmLIR *) dataTarget;
744 dataTarget = dataTarget->next;
750 * Generate an ARM_PSEUDO_BARRIER marker to indicate the boundary of special
753 static void genBarrier(CompilationUnit *cUnit)
755 ArmLIR *barrier = newLIR0(cUnit, ARM_PSEUDO_BARRIER);
756 /* Mark all resources as being clobbered */
757 barrier->defMask = -1;
760 /* Perform the actual operation for OP_RETURN_* */
761 static void genReturnCommon(CompilationUnit *cUnit, MIR *mir)
763 genDispatchToHandler(cUnit, TEMPLATE_RETURN);
764 #if defined(INVOKE_STATS)
767 int dPC = (int) (cUnit->method->insns + mir->offset);
768 /* Insert branch, but defer setting of target */
769 ArmLIR *branch = genUnconditionalBranch(cUnit, NULL);
770 /* Set up the place holder to reconstruct this Dalvik PC */
771 ArmLIR *pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true);
772 pcrLabel->opCode = ARM_PSEUDO_PC_RECONSTRUCTION_CELL;
773 pcrLabel->operands[0] = dPC;
774 pcrLabel->operands[1] = mir->offset;
775 /* Insert the place holder to the growable list */
776 dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel);
777 /* Branch to the PC reconstruction code */
778 branch->generic.target = (LIR *) pcrLabel;
781 /* Create the PC reconstruction slot if not already done */
782 static inline ArmLIR *genCheckCommon(CompilationUnit *cUnit, int dOffset,
786 /* Set up the place holder to reconstruct this Dalvik PC */
787 if (pcrLabel == NULL) {
788 int dPC = (int) (cUnit->method->insns + dOffset);
789 pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true);
790 pcrLabel->opCode = ARM_PSEUDO_PC_RECONSTRUCTION_CELL;
791 pcrLabel->operands[0] = dPC;
792 pcrLabel->operands[1] = dOffset;
793 /* Insert the place holder to the growable list */
794 dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel);
796 /* Branch to the PC reconstruction code */
797 branch->generic.target = (LIR *) pcrLabel;
803 * Perform a "reg cmp reg" operation and jump to the PCR region if condition
806 static inline ArmLIR *genRegRegCheck(CompilationUnit *cUnit,
807 ArmConditionCode cond,
808 int reg1, int reg2, int dOffset,
812 res = opRegReg(cUnit, OP_CMP, reg1, reg2);
813 ArmLIR *branch = opCondBranch(cUnit, cond);
814 genCheckCommon(cUnit, dOffset, branch, pcrLabel);
819 * Perform null-check on a register. vReg is the Dalvik register being checked,
820 * and mReg is the machine register holding the actual value. If internal state
821 * indicates that vReg has been checked before the check request is ignored.
823 static ArmLIR *genNullCheck(CompilationUnit *cUnit, int vReg, int mReg,
824 int dOffset, ArmLIR *pcrLabel)
826 /* This particular Dalvik register has been null-checked */
827 if (dvmIsBitSet(cUnit->registerScoreboard.nullCheckedRegs, vReg)) {
830 dvmSetBit(cUnit->registerScoreboard.nullCheckedRegs, vReg);
831 return genRegImmCheck(cUnit, ARM_COND_EQ, mReg, 0, dOffset, pcrLabel);
835 * Perform zero-check on a register. Similar to genNullCheck but the value being
836 * checked does not have a corresponding Dalvik register.
838 static ArmLIR *genZeroCheck(CompilationUnit *cUnit, int mReg,
839 int dOffset, ArmLIR *pcrLabel)
841 return genRegImmCheck(cUnit, ARM_COND_EQ, mReg, 0, dOffset, pcrLabel);
844 /* Perform bound check on two registers */
845 static ArmLIR *genBoundsCheck(CompilationUnit *cUnit, int rIndex,
846 int rBound, int dOffset, ArmLIR *pcrLabel)
848 return genRegRegCheck(cUnit, ARM_COND_CS, rIndex, rBound, dOffset,
852 /* Generate a unconditional branch to go to the interpreter */
853 static inline ArmLIR *genTrap(CompilationUnit *cUnit, int dOffset,
856 ArmLIR *branch = opNone(cUnit, OP_UNCOND_BR);
857 return genCheckCommon(cUnit, dOffset, branch, pcrLabel);
860 /* Load a wide field from an object instance */
861 static void genIGetWide(CompilationUnit *cUnit, MIR *mir, int fieldOffset)
863 DecodedInstruction *dInsn = &mir->dalvikInsn;
864 int reg0, reg1, reg2, reg3;
866 /* Allocate reg0..reg3 into physical registers r0..r3 */
868 /* See if vB is in a native register. If so, reuse it. */
869 reg2 = selectFirstRegister(cUnit, dInsn->vB, false);
870 /* Ping reg3 to the other register of the same pair containing reg2 */
873 * Ping reg0 to the first register of the alternate register pair
875 reg0 = (reg2 + 2) & 0xa;
876 reg1 = NEXT_REG(reg0);
878 loadValue(cUnit, dInsn->vB, reg2);
879 loadConstant(cUnit, reg3, fieldOffset);
880 genNullCheck(cUnit, dInsn->vB, reg2, mir->offset, NULL); /* null object? */
881 opRegReg(cUnit, OP_ADD, reg2, reg3);
882 #if !defined(WITH_SELF_VERIFICATION)
883 loadMultiple(cUnit, reg2, (1<<reg0 | 1<<reg1));
885 int regMap = reg1 << 8 | reg0 << 4 | reg2;
886 selfVerificationMemOpWrapper(cUnit, regMap,
887 &selfVerificationLoadDoubleword);
889 storeValuePair(cUnit, reg0, reg1, dInsn->vA, reg3);
892 /* Store a wide field to an object instance */
893 static void genIPutWide(CompilationUnit *cUnit, MIR *mir, int fieldOffset)
895 DecodedInstruction *dInsn = &mir->dalvikInsn;
896 int reg0, reg1, reg2, reg3;
898 /* Allocate reg0..reg3 into physical registers r0..r3 */
900 /* See if vB is in a native register. If so, reuse it. */
901 reg2 = selectFirstRegister(cUnit, dInsn->vB, false);
902 /* Ping reg3 to the other register of the same pair containing reg2 */
905 * Ping reg0 to the first register of the alternate register pair
907 reg0 = (reg2 + 2) & 0xa;
908 reg1 = NEXT_REG(reg0);
911 loadValue(cUnit, dInsn->vB, reg2);
912 loadValuePair(cUnit, dInsn->vA, reg0, reg1);
913 updateLiveRegisterPair(cUnit, dInsn->vA, reg0, reg1);
914 loadConstant(cUnit, reg3, fieldOffset);
915 genNullCheck(cUnit, dInsn->vB, reg2, mir->offset, NULL); /* null object? */
916 opRegReg(cUnit, OP_ADD, reg2, reg3);
917 #if !defined(WITH_SELF_VERIFICATION)
918 storeMultiple(cUnit, reg2, (1<<reg0 | 1<<reg1));
920 int regMap = reg1 << 8 | reg0 << 4 | reg2;
921 selfVerificationMemOpWrapper(cUnit, regMap,
922 &selfVerificationStoreDoubleword);
927 * Load a field from an object instance
930 static void genIGet(CompilationUnit *cUnit, MIR *mir, OpSize size,
933 DecodedInstruction *dInsn = &mir->dalvikInsn;
936 reg0 = selectFirstRegister(cUnit, dInsn->vB, false);
937 reg1 = NEXT_REG(reg0);
938 loadValue(cUnit, dInsn->vB, reg0);
939 #if !defined(WITH_SELF_VERIFICATION)
940 loadBaseDisp(cUnit, mir, reg0, fieldOffset, reg1, size, true, dInsn->vB);
942 genNullCheck(cUnit, dInsn->vB, reg0, mir->offset, NULL); /* null object? */
943 /* Combine address and offset */
944 loadConstant(cUnit, reg1, fieldOffset);
945 opRegReg(cUnit, OP_ADD, reg0, reg1);
947 int regMap = reg1 << 4 | reg0;
948 selfVerificationMemOpWrapper(cUnit, regMap, &selfVerificationLoad);
950 storeValue(cUnit, reg1, dInsn->vA, reg0);
954 * Store a field to an object instance
957 static void genIPut(CompilationUnit *cUnit, MIR *mir, OpSize size,
960 DecodedInstruction *dInsn = &mir->dalvikInsn;
961 int reg0, reg1, reg2;
963 reg0 = selectFirstRegister(cUnit, dInsn->vB, false);
964 reg1 = NEXT_REG(reg0);
965 reg2 = NEXT_REG(reg1);
967 loadValue(cUnit, dInsn->vB, reg0);
968 loadValue(cUnit, dInsn->vA, reg2);
969 updateLiveRegister(cUnit, dInsn->vA, reg2);
970 genNullCheck(cUnit, dInsn->vB, reg0, mir->offset, NULL); /* null object? */
971 #if !defined(WITH_SELF_VERIFICATION)
972 storeBaseDisp(cUnit, reg0, fieldOffset, reg2, size, reg1);
974 /* Combine address and offset */
975 loadConstant(cUnit, reg1, fieldOffset);
976 opRegReg(cUnit, OP_ADD, reg0, reg1);
978 int regMap = reg2 << 4 | reg0;
979 selfVerificationMemOpWrapper(cUnit, regMap, &selfVerificationStore);
981 opRegReg(cUnit, OP_SUB, reg0, reg1);
987 * Generate array load
990 static void genArrayGet(CompilationUnit *cUnit, MIR *mir, OpSize size,
991 int vArray, int vIndex, int vDest, int scale)
993 int lenOffset = offsetof(ArrayObject, length);
994 int dataOffset = offsetof(ArrayObject, contents);
995 int reg0, reg1, reg2, reg3;
997 reg0 = selectFirstRegister(cUnit, vArray,
998 (size == LONG) || (size == DOUBLE));
999 reg1 = NEXT_REG(reg0);
1000 reg2 = NEXT_REG(reg1);
1001 reg3 = NEXT_REG(reg2);
1003 loadValue(cUnit, vArray, reg2);
1004 loadValue(cUnit, vIndex, reg3);
1007 ArmLIR * pcrLabel = NULL;
1009 if (!(mir->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) {
1010 pcrLabel = genNullCheck(cUnit, vArray, reg2, mir->offset, NULL);
1013 if (!(mir->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
1015 loadWordDisp(cUnit, reg2, lenOffset, reg0);
1016 /* reg2 -> array data */
1017 opRegImm(cUnit, OP_ADD, reg2, dataOffset, rNone);
1018 genBoundsCheck(cUnit, reg3, reg0, mir->offset, pcrLabel);
1020 /* reg2 -> array data */
1021 opRegImm(cUnit, OP_ADD, reg2, dataOffset, rNone);
1023 #if !defined(WITH_SELF_VERIFICATION)
1024 if ((size == LONG) || (size == DOUBLE)) {
1025 //TUNING: redo. Make specific wide routine, perhaps use ldmia/fp regs
1026 opRegRegImm(cUnit, OP_LSL, reg3, reg3, scale, rNone);
1027 loadBaseIndexed(cUnit, reg2, reg3, reg0, 0, WORD);
1028 opRegImm(cUnit, OP_ADD, reg2, 4, rNone);
1029 loadBaseIndexed(cUnit, reg2, reg3, reg1, 0, WORD);
1030 storeValuePair(cUnit, reg0, reg1, vDest, reg3);
1032 loadBaseIndexed(cUnit, reg2, reg3, reg0, scale, size);
1033 storeValue(cUnit, reg0, vDest, reg3);
1036 //TODO: probably want to move this into loadBaseIndexed
1041 funct = (void*) &selfVerificationLoadDoubleword;
1044 funct = (void*) &selfVerificationLoad;
1047 funct = (void*) &selfVerificationLoadHalfword;
1050 funct = (void*) &selfVerificationLoadSignedHalfword;
1053 funct = (void*) &selfVerificationLoadByte;
1056 funct = (void*) &selfVerificationLoadSignedByte;
1062 /* Combine address and index */
1064 opRegRegImm(cUnit, OP_LSL, reg3, reg3, scale, rNone);
1065 opRegReg(cUnit, OP_ADD, reg2, reg3);
1067 int regMap = reg1 << 8 | reg0 << 4 | reg2;
1068 selfVerificationMemOpWrapper(cUnit, regMap, funct);
1070 opRegReg(cUnit, OP_SUB, reg2, reg3);
1072 if ((size == LONG) || (size == DOUBLE))
1073 storeValuePair(cUnit, reg0, reg1, vDest, reg3);
1075 storeValue(cUnit, reg0, vDest, reg3);
1080 * Generate array store
1083 static void genArrayPut(CompilationUnit *cUnit, MIR *mir, OpSize size,
1084 int vArray, int vIndex, int vSrc, int scale)
1086 int lenOffset = offsetof(ArrayObject, length);
1087 int dataOffset = offsetof(ArrayObject, contents);
1088 int reg0, reg1, reg2, reg3;
1090 reg0 = selectFirstRegister(cUnit, vArray,
1091 (size == LONG) || (size == DOUBLE));
1092 reg1 = NEXT_REG(reg0);
1093 reg2 = NEXT_REG(reg1);
1094 reg3 = NEXT_REG(reg2);
1096 loadValue(cUnit, vArray, reg2);
1097 loadValue(cUnit, vIndex, reg3);
1100 ArmLIR * pcrLabel = NULL;
1102 if (!(mir->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) {
1103 pcrLabel = genNullCheck(cUnit, vArray, reg2, mir->offset, NULL);
1106 if (!(mir->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
1108 loadWordDisp(cUnit, reg2, lenOffset, reg0);
1109 /* reg2 -> array data */
1110 opRegImm(cUnit, OP_ADD, reg2, dataOffset, rNone);
1111 genBoundsCheck(cUnit, reg3, reg0, mir->offset, pcrLabel);
1113 /* reg2 -> array data */
1114 opRegImm(cUnit, OP_ADD, reg2, dataOffset, rNone);
1117 /* at this point, reg2 points to array, reg3 is unscaled index */
1118 #if !defined(WITH_SELF_VERIFICATION)
1119 if ((size == LONG) || (size == DOUBLE)) {
1120 //TUNING: redo. Make specific wide routine, perhaps use ldmia/fp regs
1121 loadValuePair(cUnit, vSrc, reg0, reg1);
1122 updateLiveRegisterPair(cUnit, vSrc, reg0, reg1);
1124 opRegRegImm(cUnit, OP_LSL, reg3, reg3, scale, rNone);
1125 storeBaseIndexed(cUnit, reg2, reg3, reg0, 0, WORD);
1126 opRegImm(cUnit, OP_ADD, reg2, 4, rNone);
1127 storeBaseIndexed(cUnit, reg2, reg3, reg1, 0, WORD);
1129 loadValue(cUnit, vSrc, reg0);
1130 updateLiveRegister(cUnit, vSrc, reg0);
1131 storeBaseIndexed(cUnit, reg2, reg3, reg0, scale, size);
1134 //TODO: probably want to move this into storeBaseIndexed
1139 funct = (void*) &selfVerificationStoreDoubleword;
1142 funct = (void*) &selfVerificationStore;
1146 funct = (void*) &selfVerificationStoreHalfword;
1150 funct = (void*) &selfVerificationStoreByte;
1157 /* Combine address and index */
1158 if ((size == LONG) || (size == DOUBLE)) {
1159 loadValuePair(cUnit, vSrc, reg0, reg1);
1160 updateLiveRegisterPair(cUnit, vSrc, reg0, reg1);
1162 loadValue(cUnit, vSrc, reg0);
1163 updateLiveRegister(cUnit, vSrc, reg0);
1166 opRegRegImm(cUnit, OP_LSL, reg3, reg3, scale, rNone);
1167 opRegReg(cUnit, OP_ADD, reg2, reg3);
1169 int regMap = reg1 << 8 | reg0 << 4 | reg2;
1170 selfVerificationMemOpWrapper(cUnit, regMap, funct);
1172 opRegReg(cUnit, OP_SUB, reg2, reg3);
1176 static bool genShiftOpLong(CompilationUnit *cUnit, MIR *mir, int vDest,
1177 int vSrc1, int vShift)
1180 * Don't mess with the regsiters here as there is a particular calling
1181 * convention to the out-of-line handler.
1183 loadValue(cUnit, vShift, r2);
1184 loadValuePair(cUnit, vSrc1, r0, r1);
1185 switch( mir->dalvikInsn.opCode) {
1187 case OP_SHL_LONG_2ADDR:
1188 genDispatchToHandler(cUnit, TEMPLATE_SHL_LONG);
1191 case OP_SHR_LONG_2ADDR:
1192 genDispatchToHandler(cUnit, TEMPLATE_SHR_LONG);
1195 case OP_USHR_LONG_2ADDR:
1196 genDispatchToHandler(cUnit, TEMPLATE_USHR_LONG);
1201 storeValuePair(cUnit, r0, r1, vDest, r2);
1204 bool genArithOpFloatPortable(CompilationUnit *cUnit, MIR *mir,
1205 int vDest, int vSrc1, int vSrc2)
1208 * Don't optimize the regsiter usage here as they are governed by the EABI
1209 * calling convention.
1214 /* TODO: use a proper include file to define these */
1215 float __aeabi_fadd(float a, float b);
1216 float __aeabi_fsub(float a, float b);
1217 float __aeabi_fdiv(float a, float b);
1218 float __aeabi_fmul(float a, float b);
1219 float fmodf(float a, float b);
1221 reg0 = selectFirstRegister(cUnit, vSrc2, false);
1222 reg1 = NEXT_REG(reg0);
1224 switch (mir->dalvikInsn.opCode) {
1225 case OP_ADD_FLOAT_2ADDR:
1227 funct = (void*) __aeabi_fadd;
1229 case OP_SUB_FLOAT_2ADDR:
1231 funct = (void*) __aeabi_fsub;
1233 case OP_DIV_FLOAT_2ADDR:
1235 funct = (void*) __aeabi_fdiv;
1237 case OP_MUL_FLOAT_2ADDR:
1239 funct = (void*) __aeabi_fmul;
1241 case OP_REM_FLOAT_2ADDR:
1243 funct = (void*) fmodf;
1245 case OP_NEG_FLOAT: {
1246 loadValue(cUnit, vSrc2, reg0);
1247 opRegImm(cUnit, OP_ADD, reg0, 0x80000000, reg1);
1248 storeValue(cUnit, reg0, vDest, reg1);
1254 loadConstant(cUnit, r2, (int)funct);
1255 loadValue(cUnit, vSrc1, r0);
1256 loadValue(cUnit, vSrc2, r1);
1257 opReg(cUnit, OP_BLX, r2);
1258 storeValue(cUnit, r0, vDest, r1);
1262 bool genArithOpDoublePortable(CompilationUnit *cUnit, MIR *mir,
1263 int vDest, int vSrc1, int vSrc2)
1266 int reg0, reg1, reg2;
1268 /* TODO: use a proper include file to define these */
1269 double __aeabi_dadd(double a, double b);
1270 double __aeabi_dsub(double a, double b);
1271 double __aeabi_ddiv(double a, double b);
1272 double __aeabi_dmul(double a, double b);
1273 double fmod(double a, double b);
1275 reg0 = selectFirstRegister(cUnit, vSrc2, true);
1276 reg1 = NEXT_REG(reg0);
1277 reg2 = NEXT_REG(reg1);
1279 switch (mir->dalvikInsn.opCode) {
1280 case OP_ADD_DOUBLE_2ADDR:
1282 funct = (void*) __aeabi_dadd;
1284 case OP_SUB_DOUBLE_2ADDR:
1286 funct = (void*) __aeabi_dsub;
1288 case OP_DIV_DOUBLE_2ADDR:
1290 funct = (void*) __aeabi_ddiv;
1292 case OP_MUL_DOUBLE_2ADDR:
1294 funct = (void*) __aeabi_dmul;
1296 case OP_REM_DOUBLE_2ADDR:
1298 funct = (void*) fmod;
1300 case OP_NEG_DOUBLE: {
1301 loadValuePair(cUnit, vSrc2, reg0, reg1);
1302 opRegImm(cUnit, OP_ADD, reg1, 0x80000000, reg2);
1303 storeValuePair(cUnit, reg0, reg1, vDest, reg2);
1310 * Don't optimize the regsiter usage here as they are governed by the EABI
1311 * calling convention.
1313 loadConstant(cUnit, r4PC, (int)funct);
1314 loadValuePair(cUnit, vSrc1, r0, r1);
1315 loadValuePair(cUnit, vSrc2, r2, r3);
1316 opReg(cUnit, OP_BLX, r4PC);
1317 storeValuePair(cUnit, r0, r1, vDest, r2);
1321 static bool genArithOpLong(CompilationUnit *cUnit, MIR *mir, int vDest,
1322 int vSrc1, int vSrc2)
1324 OpKind firstOp = OP_BKPT;
1325 OpKind secondOp = OP_BKPT;
1326 bool callOut = false;
1329 int reg0, reg1, reg2, reg3;
1330 /* TODO - find proper .h file to declare these */
1331 long long __aeabi_ldivmod(long long op1, long long op2);
1333 switch (mir->dalvikInsn.opCode) {
1339 case OP_ADD_LONG_2ADDR:
1344 case OP_SUB_LONG_2ADDR:
1349 case OP_MUL_LONG_2ADDR:
1350 loadValuePair(cUnit, vSrc1, r0, r1);
1351 loadValuePair(cUnit, vSrc2, r2, r3);
1352 genDispatchToHandler(cUnit, TEMPLATE_MUL_LONG);
1353 storeValuePair(cUnit, r0, r1, vDest, r2);
1357 case OP_DIV_LONG_2ADDR:
1360 callTgt = (void*)__aeabi_ldivmod;
1362 /* NOTE - result is in r2/r3 instead of r0/r1 */
1364 case OP_REM_LONG_2ADDR:
1366 callTgt = (void*)__aeabi_ldivmod;
1370 case OP_AND_LONG_2ADDR:
1375 case OP_OR_LONG_2ADDR:
1380 case OP_XOR_LONG_2ADDR:
1385 reg0 = selectFirstRegister(cUnit, vSrc2, true);
1386 reg1 = NEXT_REG(reg0);
1387 reg2 = NEXT_REG(reg1);
1388 reg3 = NEXT_REG(reg2);
1390 loadValuePair(cUnit, vSrc2, reg0, reg1);
1391 loadConstant(cUnit, reg3, 0);
1392 opRegRegReg(cUnit, OP_SUB, reg2, reg3, reg0);
1393 opRegReg(cUnit, OP_SBC, reg3, reg1);
1394 storeValuePair(cUnit, reg2, reg3, vDest, reg0);
1398 LOGE("Invalid long arith op");
1402 reg0 = selectFirstRegister(cUnit, vSrc1, true);
1403 reg1 = NEXT_REG(reg0);
1404 reg2 = NEXT_REG(reg1);
1405 reg3 = NEXT_REG(reg2);
1407 loadValuePair(cUnit, vSrc1, reg0, reg1);
1408 loadValuePair(cUnit, vSrc2, reg2, reg3);
1409 opRegReg(cUnit, firstOp, reg0, reg2);
1410 opRegReg(cUnit, secondOp, reg1, reg3);
1411 storeValuePair(cUnit, reg0, reg1, vDest, reg2);
1413 * Don't optimize the register usage here as they are governed by the EABI
1414 * calling convention.
1417 loadValuePair(cUnit, vSrc2, r2, r3);
1418 loadConstant(cUnit, r4PC, (int) callTgt);
1419 loadValuePair(cUnit, vSrc1, r0, r1);
1420 opReg(cUnit, OP_BLX, r4PC);
1421 storeValuePair(cUnit, retReg, retReg+1, vDest, r4PC);
1426 static bool genArithOpInt(CompilationUnit *cUnit, MIR *mir, int vDest,
1427 int vSrc1, int vSrc2)
1429 OpKind op = OP_BKPT;
1430 bool callOut = false;
1431 bool checkZero = false;
1432 bool threeOperand = false;
1435 int reg0, reg1, regDest;
1437 /* TODO - find proper .h file to declare these */
1438 int __aeabi_idivmod(int op1, int op2);
1439 int __aeabi_idiv(int op1, int op2);
1441 switch (mir->dalvikInsn.opCode) {
1449 case OP_ADD_INT_2ADDR:
1451 threeOperand = true;
1454 case OP_SUB_INT_2ADDR:
1456 threeOperand = true;
1459 case OP_MUL_INT_2ADDR:
1463 case OP_DIV_INT_2ADDR:
1466 callTgt = __aeabi_idiv;
1469 /* NOTE: returns in r1 */
1471 case OP_REM_INT_2ADDR:
1474 callTgt = __aeabi_idivmod;
1478 case OP_AND_INT_2ADDR:
1482 case OP_OR_INT_2ADDR:
1486 case OP_XOR_INT_2ADDR:
1490 case OP_SHL_INT_2ADDR:
1494 case OP_SHR_INT_2ADDR:
1498 case OP_USHR_INT_2ADDR:
1502 LOGE("Invalid word arith op: 0x%x(%d)",
1503 mir->dalvikInsn.opCode, mir->dalvikInsn.opCode);
1507 /* Try to allocate reg0 to the currently cached source operand */
1508 if (cUnit->registerScoreboard.liveDalvikReg == vSrc1) {
1509 reg0 = selectFirstRegister(cUnit, vSrc1, false);
1510 reg1 = NEXT_REG(reg0);
1511 regDest = NEXT_REG(reg1);
1513 loadValue(cUnit, vSrc1, reg0); /* Should be optimized away */
1514 loadValue(cUnit, vSrc2, reg1);
1516 opRegRegReg(cUnit, op, regDest, reg0, reg1);
1517 storeValue(cUnit, regDest, vDest, reg1);
1519 opRegReg(cUnit, op, reg0, reg1);
1520 storeValue(cUnit, reg0, vDest, reg1);
1523 reg0 = selectFirstRegister(cUnit, vSrc2, false);
1524 reg1 = NEXT_REG(reg0);
1525 regDest = NEXT_REG(reg1);
1527 loadValue(cUnit, vSrc1, reg1); /* Load this value first */
1528 loadValue(cUnit, vSrc2, reg0); /* May be optimized away */
1530 opRegRegReg(cUnit, op, regDest, reg1, reg0);
1531 storeValue(cUnit, regDest, vDest, reg1);
1533 opRegReg(cUnit, op, reg1, reg0);
1534 storeValue(cUnit, reg1, vDest, reg0);
1539 * Load the callout target first since it will never be eliminated
1540 * and its value will be used first.
1542 loadConstant(cUnit, r2, (int) callTgt);
1544 * Load vSrc2 first if it is not cached in a native register or it
1545 * is in r0 which will be clobbered if vSrc1 is loaded first.
1547 if (cUnit->registerScoreboard.liveDalvikReg != vSrc2 ||
1548 cUnit->registerScoreboard.nativeReg == r0) {
1549 /* Cannot be optimized and won't clobber r0 */
1550 loadValue(cUnit, vSrc2, r1);
1551 /* May be optimized if vSrc1 is cached */
1552 loadValue(cUnit, vSrc1, r0);
1554 loadValue(cUnit, vSrc1, r0);
1555 loadValue(cUnit, vSrc2, r1);
1558 genNullCheck(cUnit, vSrc2, r1, mir->offset, NULL);
1560 opReg(cUnit, OP_BLX, r2);
1561 storeValue(cUnit, retReg, vDest, r2);
1566 static bool genArithOp(CompilationUnit *cUnit, MIR *mir)
1568 OpCode opCode = mir->dalvikInsn.opCode;
1569 int vA = mir->dalvikInsn.vA;
1570 int vB = mir->dalvikInsn.vB;
1571 int vC = mir->dalvikInsn.vC;
1573 if ((opCode >= OP_ADD_LONG_2ADDR) && (opCode <= OP_XOR_LONG_2ADDR)) {
1574 return genArithOpLong(cUnit,mir, vA, vA, vB);
1576 if ((opCode >= OP_ADD_LONG) && (opCode <= OP_XOR_LONG)) {
1577 return genArithOpLong(cUnit,mir, vA, vB, vC);
1579 if ((opCode >= OP_SHL_LONG_2ADDR) && (opCode <= OP_USHR_LONG_2ADDR)) {
1580 return genShiftOpLong(cUnit,mir, vA, vA, vB);
1582 if ((opCode >= OP_SHL_LONG) && (opCode <= OP_USHR_LONG)) {
1583 return genShiftOpLong(cUnit,mir, vA, vB, vC);
1585 if ((opCode >= OP_ADD_INT_2ADDR) && (opCode <= OP_USHR_INT_2ADDR)) {
1586 return genArithOpInt(cUnit,mir, vA, vA, vB);
1588 if ((opCode >= OP_ADD_INT) && (opCode <= OP_USHR_INT)) {
1589 return genArithOpInt(cUnit,mir, vA, vB, vC);
1591 if ((opCode >= OP_ADD_FLOAT_2ADDR) && (opCode <= OP_REM_FLOAT_2ADDR)) {
1592 return genArithOpFloat(cUnit,mir, vA, vA, vB);
1594 if ((opCode >= OP_ADD_FLOAT) && (opCode <= OP_REM_FLOAT)) {
1595 return genArithOpFloat(cUnit, mir, vA, vB, vC);
1597 if ((opCode >= OP_ADD_DOUBLE_2ADDR) && (opCode <= OP_REM_DOUBLE_2ADDR)) {
1598 return genArithOpDouble(cUnit,mir, vA, vA, vB);
1600 if ((opCode >= OP_ADD_DOUBLE) && (opCode <= OP_REM_DOUBLE)) {
1601 return genArithOpDouble(cUnit,mir, vA, vB, vC);
1606 static bool genConversionCall(CompilationUnit *cUnit, MIR *mir, void *funct,
1607 int srcSize, int tgtSize)
1610 * Don't optimize the register usage since it calls out to template
1613 loadConstant(cUnit, r2, (int)funct);
1615 loadValue(cUnit, mir->dalvikInsn.vB, r0);
1617 loadValuePair(cUnit, mir->dalvikInsn.vB, r0, r1);
1619 opReg(cUnit, OP_BLX, r2);
1621 storeValue(cUnit, r0, mir->dalvikInsn.vA, r1);
1623 storeValuePair(cUnit, r0, r1, mir->dalvikInsn.vA, r2);
1628 static void genProcessArgsNoRange(CompilationUnit *cUnit, MIR *mir,
1629 DecodedInstruction *dInsn,
1633 unsigned int regMask = 0;
1635 /* Load arguments to r0..r4 */
1636 for (i = 0; i < dInsn->vA; i++) {
1638 loadValue(cUnit, dInsn->arg[i], i);
1641 /* Up to 5 args are pushed on top of FP - sizeofStackSaveArea */
1642 opRegRegImm(cUnit, OP_SUB, r7, rFP,
1643 sizeof(StackSaveArea) + (dInsn->vA << 2), rNone);
1644 /* generate null check */
1646 *pcrLabel = genNullCheck(cUnit, dInsn->arg[0], r0, mir->offset,
1649 storeMultiple(cUnit, r7, regMask);
1653 static void genProcessArgsRange(CompilationUnit *cUnit, MIR *mir,
1654 DecodedInstruction *dInsn,
1657 int srcOffset = dInsn->vC << 2;
1658 int numArgs = dInsn->vA;
1664 opRegRegImm(cUnit, OP_ADD, r4PC, rFP, srcOffset, rNone);
1665 /* load [r0 .. min(numArgs,4)] */
1666 regMask = (1 << ((numArgs < 4) ? numArgs : 4)) - 1;
1668 * Protect the loadMultiple instruction from being reordered with other
1669 * Dalvik stack accesses.
1672 loadMultiple(cUnit, r4PC, regMask);
1675 opRegRegImm(cUnit, OP_SUB, r7, rFP,
1676 sizeof(StackSaveArea) + (numArgs << 2), rNone);
1677 /* generate null check */
1679 *pcrLabel = genNullCheck(cUnit, dInsn->vC, r0, mir->offset, NULL);
1683 * Handle remaining 4n arguments:
1684 * store previously loaded 4 values and load the next 4 values
1687 ArmLIR *loopLabel = NULL;
1689 * r0 contains "this" and it will be used later, so push it to the stack
1690 * first. Pushing r5 (rFP) is just for stack alignment purposes.
1692 opImm(cUnit, OP_PUSH, (1 << r0 | 1 << rFP));
1693 /* No need to generate the loop structure if numArgs <= 11 */
1695 loadConstant(cUnit, 5, ((numArgs - 4) >> 2) << 2);
1696 loopLabel = newLIR0(cUnit, ARM_PSEUDO_TARGET_LABEL);
1697 loopLabel->defMask = ENCODE_ALL;
1699 storeMultiple(cUnit, r7, regMask);
1701 * Protect the loadMultiple instruction from being reordered with other
1702 * Dalvik stack accesses.
1705 loadMultiple(cUnit, r4PC, regMask);
1707 /* No need to generate the loop structure if numArgs <= 11 */
1709 opRegImm(cUnit, OP_SUB, rFP, 4, rNone);
1710 genConditionalBranch(cUnit, ARM_COND_NE, loopLabel);
1714 /* Save the last batch of loaded values */
1715 storeMultiple(cUnit, r7, regMask);
1717 /* Generate the loop epilogue - don't use r0 */
1718 if ((numArgs > 4) && (numArgs % 4)) {
1719 regMask = ((1 << (numArgs & 0x3)) - 1) << 1;
1721 * Protect the loadMultiple instruction from being reordered with other
1722 * Dalvik stack accesses.
1725 loadMultiple(cUnit, r4PC, regMask);
1729 opImm(cUnit, OP_POP, (1 << r0 | 1 << rFP));
1731 /* Save the modulo 4 arguments */
1732 if ((numArgs > 4) && (numArgs % 4)) {
1733 storeMultiple(cUnit, r7, regMask);
1738 * Generate code to setup the call stack then jump to the chaining cell if it
1739 * is not a native method.
1741 static void genInvokeSingletonCommon(CompilationUnit *cUnit, MIR *mir,
1742 BasicBlock *bb, ArmLIR *labelList,
1744 const Method *calleeMethod)
1746 ArmLIR *retChainingCell = &labelList[bb->fallThrough->id];
1748 /* r1 = &retChainingCell */
1749 ArmLIR *addrRetChain = opRegRegImm(cUnit, OP_ADD, r1, rpc, 0, rNone);
1750 /* r4PC = dalvikCallsite */
1751 loadConstant(cUnit, r4PC,
1752 (int) (cUnit->method->insns + mir->offset));
1753 addrRetChain->generic.target = (LIR *) retChainingCell;
1755 * r0 = calleeMethod (loaded upon calling genInvokeSingletonCommon)
1756 * r1 = &ChainingCell
1757 * r4PC = callsiteDPC
1759 if (dvmIsNativeMethod(calleeMethod)) {
1760 genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_NATIVE);
1761 #if defined(INVOKE_STATS)
1762 gDvmJit.invokeNative++;
1765 genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_CHAIN);
1766 #if defined(INVOKE_STATS)
1767 gDvmJit.invokeChain++;
1769 /* Branch to the chaining cell */
1770 genUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
1772 /* Handle exceptions using the interpreter */
1773 genTrap(cUnit, mir->offset, pcrLabel);
1777 * Generate code to check the validity of a predicted chain and take actions
1778 * based on the result.
1780 * 0x426a99aa : ldr r4, [pc, #72] --> r4 <- dalvikPC of this invoke
1781 * 0x426a99ac : add r1, pc, #32 --> r1 <- &retChainingCell
1782 * 0x426a99ae : add r2, pc, #40 --> r2 <- &predictedChainingCell
1783 * 0x426a99b0 : blx_1 0x426a918c --+ TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN
1784 * 0x426a99b2 : blx_2 see above --+
1785 * 0x426a99b4 : b 0x426a99d8 --> off to the predicted chain
1786 * 0x426a99b6 : b 0x426a99c8 --> punt to the interpreter
1787 * 0x426a99b8 : ldr r0, [r7, #44] --> r0 <- this->class->vtable[methodIdx]
1788 * 0x426a99ba : cmp r1, #0 --> compare r1 (rechain count) against 0
1789 * 0x426a99bc : bgt 0x426a99c2 --> >=0? don't rechain
1790 * 0x426a99be : ldr r7, [r6, #96] --+ dvmJitToPatchPredictedChain
1791 * 0x426a99c0 : blx r7 --+
1792 * 0x426a99c2 : add r1, pc, #12 --> r1 <- &retChainingCell
1793 * 0x426a99c4 : blx_1 0x426a9098 --+ TEMPLATE_INVOKE_METHOD_NO_OPT
1794 * 0x426a99c6 : blx_2 see above --+
1796 static void genInvokeVirtualCommon(CompilationUnit *cUnit, MIR *mir,
1798 ArmLIR *retChainingCell,
1799 ArmLIR *predChainingCell,
1802 /* "this" is already left in r0 by genProcessArgs* */
1804 /* r4PC = dalvikCallsite */
1805 loadConstant(cUnit, r4PC,
1806 (int) (cUnit->method->insns + mir->offset));
1808 /* r1 = &retChainingCell */
1809 ArmLIR *addrRetChain = opRegRegImm(cUnit, OP_ADD, r1, rpc, 0, rNone);
1810 addrRetChain->generic.target = (LIR *) retChainingCell;
1812 /* r2 = &predictedChainingCell */
1813 ArmLIR *predictedChainingCell = opRegRegImm(cUnit, OP_ADD, r2, rpc, 0,
1815 predictedChainingCell->generic.target = (LIR *) predChainingCell;
1817 genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN);
1819 /* return through lr - jump to the chaining cell */
1820 genUnconditionalBranch(cUnit, predChainingCell);
1823 * null-check on "this" may have been eliminated, but we still need a PC-
1824 * reconstruction label for stack overflow bailout.
1826 if (pcrLabel == NULL) {
1827 int dPC = (int) (cUnit->method->insns + mir->offset);
1828 pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true);
1829 pcrLabel->opCode = ARM_PSEUDO_PC_RECONSTRUCTION_CELL;
1830 pcrLabel->operands[0] = dPC;
1831 pcrLabel->operands[1] = mir->offset;
1832 /* Insert the place holder to the growable list */
1833 dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel);
1836 /* return through lr+2 - punt to the interpreter */
1837 genUnconditionalBranch(cUnit, pcrLabel);
1840 * return through lr+4 - fully resolve the callee method.
1842 * r2 <- &predictedChainCell
1845 * r7 <- this->class->vtable
1848 /* r0 <- calleeMethod */
1849 loadWordDisp(cUnit, r7, methodIndex * 4, r0);
1851 /* Check if rechain limit is reached */
1852 opRegImm(cUnit, OP_CMP, r1, 0, rNone);
1854 ArmLIR *bypassRechaining = opCondBranch(cUnit, ARM_COND_GT);
1856 loadWordDisp(cUnit, rGLUE, offsetof(InterpState,
1857 jitToInterpEntries.dvmJitToPatchPredictedChain), r7);
1861 * r2 = &predictedChainingCell
1864 * &returnChainingCell has been loaded into r1 but is not needed
1865 * when patching the chaining cell and will be clobbered upon
1866 * returning so it will be reconstructed again.
1868 opReg(cUnit, OP_BLX, r7);
1870 /* r1 = &retChainingCell */
1871 addrRetChain = opRegRegImm(cUnit, OP_ADD, r1, rpc, 0, rNone);
1872 addrRetChain->generic.target = (LIR *) retChainingCell;
1874 bypassRechaining->generic.target = (LIR *) addrRetChain;
1876 * r0 = calleeMethod,
1877 * r1 = &ChainingCell,
1878 * r4PC = callsiteDPC,
1880 genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_NO_OPT);
1881 #if defined(INVOKE_STATS)
1882 gDvmJit.invokePredictedChain++;
1884 /* Handle exceptions using the interpreter */
1885 genTrap(cUnit, mir->offset, pcrLabel);
1889 * Up calling this function, "this" is stored in r0. The actual class will be
1890 * chased down off r0 and the predicted one will be retrieved through
1891 * predictedChainingCell then a comparison is performed to see whether the
1892 * previously established chaining is still valid.
1894 * The return LIR is a branch based on the comparison result. The actual branch
1895 * target will be setup in the caller.
1897 static ArmLIR *genCheckPredictedChain(CompilationUnit *cUnit,
1898 ArmLIR *predChainingCell,
1899 ArmLIR *retChainingCell,
1902 /* r3 now contains this->clazz */
1903 loadWordDisp(cUnit, r0, offsetof(Object, clazz), r3);
1906 * r2 now contains predicted class. The starting offset of the
1907 * cached value is 4 bytes into the chaining cell.
1909 ArmLIR *getPredictedClass =
1910 loadWordDisp(cUnit, rpc, offsetof(PredictedChainingCell, clazz), r2);
1911 getPredictedClass->generic.target = (LIR *) predChainingCell;
1914 * r0 now contains predicted method. The starting offset of the
1915 * cached value is 8 bytes into the chaining cell.
1917 ArmLIR *getPredictedMethod =
1918 loadWordDisp(cUnit, rpc, offsetof(PredictedChainingCell, method), r0);
1919 getPredictedMethod->generic.target = (LIR *) predChainingCell;
1921 /* Load the stats counter to see if it is time to unchain and refresh */
1922 ArmLIR *getRechainingRequestCount =
1923 loadWordDisp(cUnit, rpc, offsetof(PredictedChainingCell, counter), r7);
1924 getRechainingRequestCount->generic.target =
1925 (LIR *) predChainingCell;
1927 /* r4PC = dalvikCallsite */
1928 loadConstant(cUnit, r4PC,
1929 (int) (cUnit->method->insns + mir->offset));
1931 /* r1 = &retChainingCell */
1932 ArmLIR *addrRetChain = opRegRegImm(cUnit, OP_ADD, r1, rpc, 0, rNone);
1933 addrRetChain->generic.target = (LIR *) retChainingCell;
1935 /* Check if r2 (predicted class) == r3 (actual class) */
1936 opRegReg(cUnit, OP_CMP, r2, r3);
1938 return opCondBranch(cUnit, ARM_COND_EQ);
1941 /* Geneate a branch to go back to the interpreter */
1942 static void genPuntToInterp(CompilationUnit *cUnit, unsigned int offset)
1944 /* r0 = dalvik pc */
1945 loadConstant(cUnit, r0, (int) (cUnit->method->insns + offset));
1946 loadWordDisp(cUnit, r0, offsetof(Object, clazz), r3);
1947 loadWordDisp(cUnit, rGLUE, offsetof(InterpState,
1948 jitToInterpEntries.dvmJitToInterpPunt), r1);
1949 opReg(cUnit, OP_BLX, r1);
1953 * Attempt to single step one instruction using the interpreter and return
1954 * to the compiled code for the next Dalvik instruction
1956 static void genInterpSingleStep(CompilationUnit *cUnit, MIR *mir)
1958 int flags = dexGetInstrFlags(gDvm.instrFlags, mir->dalvikInsn.opCode);
1959 int flagsToCheck = kInstrCanBranch | kInstrCanSwitch | kInstrCanReturn |
1961 if ((mir->next == NULL) || (flags & flagsToCheck)) {
1962 genPuntToInterp(cUnit, mir->offset);
1965 int entryAddr = offsetof(InterpState,
1966 jitToInterpEntries.dvmJitToInterpSingleStep);
1967 loadWordDisp(cUnit, rGLUE, entryAddr, r2);
1968 /* r0 = dalvik pc */
1969 loadConstant(cUnit, r0, (int) (cUnit->method->insns + mir->offset));
1970 /* r1 = dalvik pc of following instruction */
1971 loadConstant(cUnit, r1, (int) (cUnit->method->insns + mir->next->offset));
1972 opReg(cUnit, OP_BLX, r2);
1975 /* Generate conditional branch instructions */
1976 static ArmLIR *genConditionalBranch(CompilationUnit *cUnit,
1977 ArmConditionCode cond,
1980 ArmLIR *branch = opCondBranch(cUnit, cond);
1981 branch->generic.target = (LIR *) target;
1985 /* Generate unconditional branch instructions */
1986 static ArmLIR *genUnconditionalBranch(CompilationUnit *cUnit, ArmLIR *target)
1988 ArmLIR *branch = opNone(cUnit, OP_UNCOND_BR);
1989 branch->generic.target = (LIR *) target;
1993 /* Load the address of a Dalvik register on the frame */
1994 static ArmLIR *loadValueAddress(CompilationUnit *cUnit, int vSrc, int rDest)
1996 return opRegRegImm(cUnit, OP_ADD, rDest, rFP, vSrc*4, rNone);
1999 /* Load a single value from rFP[src] and store them into rDest */
2000 static ArmLIR *loadValue(CompilationUnit *cUnit, int vSrc, int rDest)
2002 return loadBaseDisp(cUnit, NULL, rFP, vSrc * 4, rDest, WORD, false, -1);
2005 /* Load a word at base + displacement. Displacement must be word multiple */
2006 static ArmLIR *loadWordDisp(CompilationUnit *cUnit, int rBase, int displacement,
2009 return loadBaseDisp(cUnit, NULL, rBase, displacement, rDest, WORD, false,
2013 static ArmLIR *storeWordDisp(CompilationUnit *cUnit, int rBase,
2014 int displacement, int rSrc, int rScratch)
2016 return storeBaseDisp(cUnit, rBase, displacement, rSrc, WORD, rScratch);
2019 /* Store a value from rSrc to vDest */
2020 static ArmLIR *storeValue(CompilationUnit *cUnit, int rSrc, int vDest,
2023 killNullCheckedRegister(cUnit, vDest);
2024 updateLiveRegister(cUnit, vDest, rSrc);
2025 return storeBaseDisp(cUnit, rFP, vDest * 4, rSrc, WORD, rScratch);
2028 * Load a pair of values of rFP[src..src+1] and store them into rDestLo and
2031 static ArmLIR *loadValuePair(CompilationUnit *cUnit, int vSrc, int rDestLo,
2035 /* Use reg + imm5*4 to load the values if possible */
2037 res = loadWordDisp(cUnit, rFP, vSrc*4, rDestLo);
2038 loadWordDisp(cUnit, rFP, (vSrc+1)*4, rDestHi);
2040 assert(rDestLo < rDestHi);
2041 res = loadValueAddress(cUnit, vSrc, rDestLo);
2043 * Protect the loadMultiple instruction from being reordered with other
2044 * Dalvik stack accesses.
2047 loadMultiple(cUnit, rDestLo, (1<<rDestLo) | (1<<rDestHi));
2054 * Store a pair of values of rSrc and rSrc+1 and store them into vDest and
2057 static ArmLIR *storeValuePair(CompilationUnit *cUnit, int rSrcLo, int rSrcHi,
2058 int vDest, int rScratch)
2061 killNullCheckedRegister(cUnit, vDest);
2062 killNullCheckedRegister(cUnit, vDest+1);
2063 updateLiveRegisterPair(cUnit, vDest, rSrcLo, rSrcHi);
2065 /* Use reg + imm5*4 to store the values if possible */
2067 res = storeWordDisp(cUnit, rFP, vDest*4, rSrcLo, rScratch);
2068 storeWordDisp(cUnit, rFP, (vDest+1)*4, rSrcHi, rScratch);
2070 assert(rSrcLo < rSrcHi);
2071 res = loadValueAddress(cUnit, vDest, rScratch);
2073 * Protect the storeMultiple instruction from being reordered with
2074 * other Dalvik stack accesses.
2077 storeMultiple(cUnit, rScratch, (1<<rSrcLo) | (1 << rSrcHi));
2083 static ArmLIR *genRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
2085 ArmLIR *res = dvmCompilerRegCopy(cUnit, rDest, rSrc);
2086 dvmCompilerAppendLIR(cUnit, (LIR*)res);
2091 * The following are the first-level codegen routines that analyze the format
2092 * of each bytecode then either dispatch special purpose codegen routines
2093 * or produce corresponding Thumb instructions directly.
2096 static bool handleFmt10t_Fmt20t_Fmt30t(CompilationUnit *cUnit, MIR *mir,
2097 BasicBlock *bb, ArmLIR *labelList)
2099 /* For OP_GOTO, OP_GOTO_16, and OP_GOTO_32 */
2100 genUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
2104 static bool handleFmt10x(CompilationUnit *cUnit, MIR *mir)
2106 OpCode dalvikOpCode = mir->dalvikInsn.opCode;
2107 if (((dalvikOpCode >= OP_UNUSED_3E) && (dalvikOpCode <= OP_UNUSED_43)) ||
2108 ((dalvikOpCode >= OP_UNUSED_E3) && (dalvikOpCode <= OP_UNUSED_EC))) {
2109 LOGE("Codegen: got unused opcode 0x%x\n",dalvikOpCode);
2112 switch (dalvikOpCode) {
2113 case OP_RETURN_VOID:
2114 genReturnCommon(cUnit,mir);
2119 LOGE("Codegen: got unused opcode 0x%x\n",dalvikOpCode);
2129 static bool handleFmt11n_Fmt31i(CompilationUnit *cUnit, MIR *mir)
2131 int reg0, reg1, reg2;
2133 switch (mir->dalvikInsn.opCode) {
2136 /* Avoid using the previously used register */
2137 reg0 = selectFirstRegister(cUnit, vNone, false);
2138 reg1 = NEXT_REG(reg0);
2139 loadConstant(cUnit, reg0, mir->dalvikInsn.vB);
2140 storeValue(cUnit, reg0, mir->dalvikInsn.vA, reg1);
2143 case OP_CONST_WIDE_32: {
2144 /* Avoid using the previously used register */
2145 reg0 = selectFirstRegister(cUnit, vNone, true);
2146 reg1 = NEXT_REG(reg0);
2147 reg2 = NEXT_REG(reg1);
2148 loadConstant(cUnit, reg0, mir->dalvikInsn.vB);
2149 opRegRegImm(cUnit, OP_ASR, reg1, reg0, 31, rNone);
2150 storeValuePair(cUnit, reg0, reg1, mir->dalvikInsn.vA, reg2);
2159 static bool handleFmt21h(CompilationUnit *cUnit, MIR *mir)
2161 int reg0, reg1, reg2;
2163 /* Avoid using the previously used register */
2164 switch (mir->dalvikInsn.opCode) {
2165 case OP_CONST_HIGH16: {
2166 reg0 = selectFirstRegister(cUnit, vNone, false);
2167 reg1 = NEXT_REG(reg0);
2168 loadConstant(cUnit, reg0, mir->dalvikInsn.vB << 16);
2169 storeValue(cUnit, reg0, mir->dalvikInsn.vA, reg1);
2172 case OP_CONST_WIDE_HIGH16: {
2173 reg0 = selectFirstRegister(cUnit, vNone, true);
2174 reg1 = NEXT_REG(reg0);
2175 reg2 = NEXT_REG(reg1);
2176 loadConstant(cUnit, reg1, mir->dalvikInsn.vB << 16);
2177 loadConstant(cUnit, reg0, 0);
2178 storeValuePair(cUnit, reg0, reg1, mir->dalvikInsn.vA, reg2);
2187 static bool handleFmt20bc(CompilationUnit *cUnit, MIR *mir)
2189 /* For OP_THROW_VERIFICATION_ERROR */
2190 genInterpSingleStep(cUnit, mir);
2194 static bool handleFmt21c_Fmt31c(CompilationUnit *cUnit, MIR *mir)
2196 /* Native register to use if the interested value is vA */
2197 int regvA = selectFirstRegister(cUnit, mir->dalvikInsn.vA, false);
2198 /* Native register to use if source is not from Dalvik registers */
2199 int regvNone = selectFirstRegister(cUnit, vNone, false);
2200 /* Similar to regvA but for 64-bit values */
2201 int regvAWide = selectFirstRegister(cUnit, mir->dalvikInsn.vA, true);
2202 /* Similar to regvNone but for 64-bit values */
2203 int regvNoneWide = selectFirstRegister(cUnit, vNone, true);
2205 switch (mir->dalvikInsn.opCode) {
2206 case OP_CONST_STRING_JUMBO:
2207 case OP_CONST_STRING: {
2208 void *strPtr = (void*)
2209 (cUnit->method->clazz->pDvmDex->pResStrings[mir->dalvikInsn.vB]);
2210 assert(strPtr != NULL);
2211 loadConstant(cUnit, regvNone, (int) strPtr );
2212 storeValue(cUnit, regvNone, mir->dalvikInsn.vA, NEXT_REG(regvNone));
2215 case OP_CONST_CLASS: {
2216 void *classPtr = (void*)
2217 (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vB]);
2218 assert(classPtr != NULL);
2219 loadConstant(cUnit, regvNone, (int) classPtr );
2220 storeValue(cUnit, regvNone, mir->dalvikInsn.vA, NEXT_REG(regvNone));
2223 case OP_SGET_OBJECT:
2224 case OP_SGET_BOOLEAN:
2229 int valOffset = offsetof(StaticField, value);
2230 void *fieldPtr = (void*)
2231 (cUnit->method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
2232 assert(fieldPtr != NULL);
2233 loadConstant(cUnit, regvNone, (int) fieldPtr + valOffset);
2234 #if !defined(WITH_SELF_VERIFICATION)
2235 loadWordDisp(cUnit, regvNone, 0, regvNone);
2237 int regMap = regvNone << 4 | regvNone;
2238 selfVerificationMemOpWrapper(cUnit, regMap, &selfVerificationLoad);
2241 storeValue(cUnit, regvNone, mir->dalvikInsn.vA, NEXT_REG(regvNone));
2244 case OP_SGET_WIDE: {
2245 int valOffset = offsetof(StaticField, value);
2246 void *fieldPtr = (void*)
2247 (cUnit->method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
2248 int reg0, reg1, reg2;
2250 assert(fieldPtr != NULL);
2251 reg0 = regvNoneWide;
2252 reg1 = NEXT_REG(reg0);
2253 reg2 = NEXT_REG(reg1);
2254 loadConstant(cUnit, reg2, (int) fieldPtr + valOffset);
2255 #if !defined(WITH_SELF_VERIFICATION)
2256 loadMultiple(cUnit, reg2, (1<<reg0 | 1<<reg1));
2258 int regMap = reg1 << 8 | reg0 << 4 | reg2;
2259 selfVerificationMemOpWrapper(cUnit, regMap,
2260 &selfVerificationLoadDoubleword);
2263 storeValuePair(cUnit, reg0, reg1, mir->dalvikInsn.vA, reg2);
2266 case OP_SPUT_OBJECT:
2267 case OP_SPUT_BOOLEAN:
2272 int valOffset = offsetof(StaticField, value);
2273 void *fieldPtr = (void*)
2274 (cUnit->method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
2276 assert(fieldPtr != NULL);
2277 loadValue(cUnit, mir->dalvikInsn.vA, regvA);
2278 updateLiveRegister(cUnit, mir->dalvikInsn.vA, regvA);
2279 loadConstant(cUnit, NEXT_REG(regvA), (int) fieldPtr + valOffset);
2280 #if !defined(WITH_SELF_VERIFICATION)
2281 storeWordDisp(cUnit, NEXT_REG(regvA), 0 , regvA, -1);
2283 int regMap = regvA << 4 | NEXT_REG(regvA);
2284 selfVerificationMemOpWrapper(cUnit, regMap, &selfVerificationStore);
2288 case OP_SPUT_WIDE: {
2289 int reg0, reg1, reg2;
2290 int valOffset = offsetof(StaticField, value);
2291 void *fieldPtr = (void*)
2292 (cUnit->method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
2294 assert(fieldPtr != NULL);
2296 reg1 = NEXT_REG(reg0);
2297 reg2 = NEXT_REG(reg1);
2298 loadValuePair(cUnit, mir->dalvikInsn.vA, reg0, reg1);
2299 updateLiveRegisterPair(cUnit, mir->dalvikInsn.vA, reg0, reg1);
2300 loadConstant(cUnit, reg2, (int) fieldPtr + valOffset);
2301 #if !defined(WITH_SELF_VERIFICATION)
2302 storeMultiple(cUnit, reg2, (1<<reg0 | 1<<reg1));
2304 int regMap = reg1 << 8 | reg0 << 4 | reg2;
2305 selfVerificationMemOpWrapper(cUnit, regMap,
2306 &selfVerificationStoreDoubleword);
2310 case OP_NEW_INSTANCE: {
2312 * Obey the calling convention and don't mess with the register
2315 ClassObject *classPtr = (void*)
2316 (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vB]);
2317 assert(classPtr != NULL);
2318 assert(classPtr->status & CLASS_INITIALIZED);
2320 * If it is going to throw, it should not make to the trace to begin
2323 assert((classPtr->accessFlags & (ACC_INTERFACE|ACC_ABSTRACT)) == 0);
2324 loadConstant(cUnit, r4PC, (int)dvmAllocObject);
2325 loadConstant(cUnit, r0, (int) classPtr);
2326 genExportPC(cUnit, mir, r2, r3 );
2327 loadConstant(cUnit, r1, ALLOC_DONT_TRACK);
2328 opReg(cUnit, OP_BLX, r4PC);
2329 /* generate a branch over if allocation is successful */
2330 opRegImm(cUnit, OP_CMP, r0, 0, rNone); /* NULL? */
2331 ArmLIR *branchOver = opCondBranch(cUnit, ARM_COND_NE);
2333 * OOM exception needs to be thrown here and cannot re-execute
2335 loadConstant(cUnit, r0,
2336 (int) (cUnit->method->insns + mir->offset));
2337 genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
2340 ArmLIR *target = newLIR0(cUnit, ARM_PSEUDO_TARGET_LABEL);
2341 target->defMask = ENCODE_ALL;
2342 branchOver->generic.target = (LIR *) target;
2343 storeValue(cUnit, r0, mir->dalvikInsn.vA, r1);
2346 case OP_CHECK_CAST: {
2348 * Obey the calling convention and don't mess with the register
2351 ClassObject *classPtr =
2352 (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vB]);
2353 loadConstant(cUnit, r1, (int) classPtr );
2354 loadValue(cUnit, mir->dalvikInsn.vA, r0); /* Ref */
2355 opRegImm(cUnit, OP_CMP, r0, 0, rNone); /* Null? */
2356 ArmLIR *branch1 = opCondBranch(cUnit, ARM_COND_EQ);
2357 /* r0 now contains object->clazz */
2358 loadWordDisp(cUnit, r0, offsetof(Object, clazz), r0);
2359 loadConstant(cUnit, r4PC, (int)dvmInstanceofNonTrivial);
2360 opRegReg(cUnit, OP_CMP, r0, r1);
2361 ArmLIR *branch2 = opCondBranch(cUnit, ARM_COND_EQ);
2362 opReg(cUnit, OP_BLX, r4PC);
2363 /* check cast failed - punt to the interpreter */
2364 genZeroCheck(cUnit, r0, mir->offset, NULL);
2365 /* check cast passed - branch target here */
2366 ArmLIR *target = newLIR0(cUnit, ARM_PSEUDO_TARGET_LABEL);
2367 target->defMask = ENCODE_ALL;
2368 branch1->generic.target = (LIR *)target;
2369 branch2->generic.target = (LIR *)target;
2378 static bool handleFmt11x(CompilationUnit *cUnit, MIR *mir)
2380 OpCode dalvikOpCode = mir->dalvikInsn.opCode;
2381 switch (dalvikOpCode) {
2382 case OP_MOVE_EXCEPTION: {
2383 int offset = offsetof(InterpState, self);
2384 int exOffset = offsetof(Thread, exception);
2385 loadWordDisp(cUnit, rGLUE, offset, r1);
2386 loadWordDisp(cUnit, r1, exOffset, r0);
2387 storeValue(cUnit, r0, mir->dalvikInsn.vA, r1);
2390 case OP_MOVE_RESULT:
2391 case OP_MOVE_RESULT_OBJECT: {
2392 int offset = offsetof(InterpState, retval);
2393 loadWordDisp(cUnit, rGLUE, offset, r0);
2394 storeValue(cUnit, r0, mir->dalvikInsn.vA, r1);
2397 case OP_MOVE_RESULT_WIDE: {
2398 int offset = offsetof(InterpState, retval);
2399 loadWordDisp(cUnit, rGLUE, offset, r0);
2400 loadWordDisp(cUnit, rGLUE, offset+4, r1);
2401 storeValuePair(cUnit, r0, r1, mir->dalvikInsn.vA, r2);
2404 case OP_RETURN_WIDE: {
2405 int vSrc = mir->dalvikInsn.vA;
2406 int reg0 = selectFirstRegister(cUnit, vSrc, true);
2407 int reg1 = NEXT_REG(reg0);
2408 int rScratch = NEXT_REG(reg1);
2409 int offset = offsetof(InterpState, retval);
2410 loadValuePair(cUnit, vSrc, reg0, reg1);
2411 storeWordDisp(cUnit, rGLUE, offset, reg0, rScratch);
2412 storeWordDisp(cUnit, rGLUE, offset + 4, reg1, rScratch);
2413 genReturnCommon(cUnit,mir);
2417 case OP_RETURN_OBJECT: {
2418 int vSrc = mir->dalvikInsn.vA;
2419 int reg0 = selectFirstRegister(cUnit, vSrc, false);
2420 int rScratch = NEXT_REG(reg0);
2421 loadValue(cUnit, vSrc, reg0);
2422 storeWordDisp(cUnit, rGLUE, offsetof(InterpState, retval),
2424 genReturnCommon(cUnit,mir);
2427 case OP_MONITOR_ENTER:
2428 case OP_MONITOR_EXIT: {
2429 int offset = offsetof(InterpState, self);
2430 loadValue(cUnit, mir->dalvikInsn.vA, r1);
2431 loadWordDisp(cUnit, rGLUE, offset, r0);
2432 if (dalvikOpCode == OP_MONITOR_ENTER) {
2433 loadConstant(cUnit, r2, (int)dvmLockObject);
2435 loadConstant(cUnit, r2, (int)dvmUnlockObject);
2437 genNullCheck(cUnit, mir->dalvikInsn.vA, r1, mir->offset, NULL);
2439 opReg(cUnit, OP_BLX, r2);
2443 genInterpSingleStep(cUnit, mir);
2452 static bool genConversionPortable(CompilationUnit *cUnit, MIR *mir)
2454 OpCode opCode = mir->dalvikInsn.opCode;
2456 float __aeabi_i2f( int op1 );
2457 int __aeabi_f2iz( float op1 );
2458 float __aeabi_d2f( double op1 );
2459 double __aeabi_f2d( float op1 );
2460 double __aeabi_i2d( int op1 );
2461 int __aeabi_d2iz( double op1 );
2462 float __aeabi_l2f( long op1 );
2463 double __aeabi_l2d( long op1 );
2464 s8 dvmJitf2l( float op1 );
2465 s8 dvmJitd2l( double op1 );
2468 case OP_INT_TO_FLOAT:
2469 return genConversionCall(cUnit, mir, (void*)__aeabi_i2f, 1, 1);
2470 case OP_FLOAT_TO_INT:
2471 return genConversionCall(cUnit, mir, (void*)__aeabi_f2iz, 1, 1);
2472 case OP_DOUBLE_TO_FLOAT:
2473 return genConversionCall(cUnit, mir, (void*)__aeabi_d2f, 2, 1);
2474 case OP_FLOAT_TO_DOUBLE:
2475 return genConversionCall(cUnit, mir, (void*)__aeabi_f2d, 1, 2);
2476 case OP_INT_TO_DOUBLE:
2477 return genConversionCall(cUnit, mir, (void*)__aeabi_i2d, 1, 2);
2478 case OP_DOUBLE_TO_INT:
2479 return genConversionCall(cUnit, mir, (void*)__aeabi_d2iz, 2, 1);
2480 case OP_FLOAT_TO_LONG:
2481 return genConversionCall(cUnit, mir, (void*)dvmJitf2l, 1, 2);
2482 case OP_LONG_TO_FLOAT:
2483 return genConversionCall(cUnit, mir, (void*)__aeabi_l2f, 2, 1);
2484 case OP_DOUBLE_TO_LONG:
2485 return genConversionCall(cUnit, mir, (void*)dvmJitd2l, 2, 2);
2486 case OP_LONG_TO_DOUBLE:
2487 return genConversionCall(cUnit, mir, (void*)__aeabi_l2d, 2, 2);
2494 static bool handleFmt12x(CompilationUnit *cUnit, MIR *mir)
2496 OpCode opCode = mir->dalvikInsn.opCode;
2497 int vSrc1Dest = mir->dalvikInsn.vA;
2498 int vSrc2 = mir->dalvikInsn.vB;
2499 int reg0, reg1, reg2;
2501 if ( (opCode >= OP_ADD_INT_2ADDR) && (opCode <= OP_REM_DOUBLE_2ADDR)) {
2502 return genArithOp( cUnit, mir );
2506 * If data type is 64-bit, re-calculate the register numbers in the
2507 * corresponding cases.
2509 reg0 = selectFirstRegister(cUnit, vSrc2, false);
2510 reg1 = NEXT_REG(reg0);
2511 reg2 = NEXT_REG(reg1);
2514 case OP_INT_TO_FLOAT:
2515 case OP_FLOAT_TO_INT:
2516 case OP_DOUBLE_TO_FLOAT:
2517 case OP_FLOAT_TO_DOUBLE:
2518 case OP_INT_TO_DOUBLE:
2519 case OP_DOUBLE_TO_INT:
2520 case OP_FLOAT_TO_LONG:
2521 case OP_LONG_TO_FLOAT:
2522 case OP_DOUBLE_TO_LONG:
2523 case OP_LONG_TO_DOUBLE:
2524 return genConversion(cUnit, mir);
2527 return genArithOpInt(cUnit, mir, vSrc1Dest, vSrc1Dest, vSrc2);
2530 return genArithOpLong(cUnit,mir, vSrc1Dest, vSrc1Dest, vSrc2);
2532 return genArithOpFloat(cUnit, mir, vSrc1Dest, vSrc1Dest, vSrc2);
2534 return genArithOpDouble(cUnit, mir, vSrc1Dest, vSrc1Dest, vSrc2);
2535 case OP_MOVE_WIDE: {
2536 reg0 = selectFirstRegister(cUnit, vSrc2, true);
2537 reg1 = NEXT_REG(reg0);
2538 reg2 = NEXT_REG(reg1);
2540 loadValuePair(cUnit, vSrc2, reg0, reg1);
2541 storeValuePair(cUnit, reg0, reg1, vSrc1Dest, reg2);
2544 case OP_INT_TO_LONG: {
2545 reg0 = selectFirstRegister(cUnit, vSrc2, true);
2546 reg1 = NEXT_REG(reg0);
2547 reg2 = NEXT_REG(reg1);
2549 loadValue(cUnit, vSrc2, reg0);
2550 opRegRegImm(cUnit, OP_ASR, reg1, reg0, 31, rNone);
2551 storeValuePair(cUnit, reg0, reg1, vSrc1Dest, reg2);
2555 case OP_MOVE_OBJECT:
2556 case OP_LONG_TO_INT:
2557 loadValue(cUnit, vSrc2, reg0);
2558 storeValue(cUnit, reg0, vSrc1Dest, reg1);
2560 case OP_INT_TO_BYTE:
2561 loadValue(cUnit, vSrc2, reg0);
2562 opRegReg(cUnit, OP_2BYTE, reg1, reg0);
2563 storeValue(cUnit, reg1, vSrc1Dest, reg2);
2565 case OP_INT_TO_SHORT:
2566 loadValue(cUnit, vSrc2, reg0);
2567 opRegReg(cUnit, OP_2SHORT, reg1, reg0);
2568 storeValue(cUnit, reg1, vSrc1Dest, reg2);
2570 case OP_INT_TO_CHAR:
2571 loadValue(cUnit, vSrc2, reg0);
2572 opRegReg(cUnit, OP_2CHAR, reg1, reg0);
2573 storeValue(cUnit, reg1, vSrc1Dest, reg2);
2575 case OP_ARRAY_LENGTH: {
2576 int lenOffset = offsetof(ArrayObject, length);
2577 loadValue(cUnit, vSrc2, reg1);
2578 genNullCheck(cUnit, vSrc2, reg1, mir->offset, NULL);
2579 loadWordDisp(cUnit, reg1, lenOffset, reg0);
2580 storeValue(cUnit, reg0, vSrc1Dest, reg1);
2589 static bool handleFmt21s(CompilationUnit *cUnit, MIR *mir)
2591 OpCode dalvikOpCode = mir->dalvikInsn.opCode;
2592 int reg0, reg1, reg2;
2594 /* It takes few instructions to handle OP_CONST_WIDE_16 inline */
2595 if (dalvikOpCode == OP_CONST_WIDE_16) {
2596 int vDest = mir->dalvikInsn.vA;
2597 int BBBB = mir->dalvikInsn.vB;
2599 reg0 = selectFirstRegister(cUnit, vNone, true);
2600 reg1 = NEXT_REG(reg0);
2601 reg2 = NEXT_REG(reg1);
2603 loadConstant(cUnit, reg0, BBBB);
2604 opRegRegImm(cUnit, OP_ASR, reg1, reg0, 31, rNone);
2606 /* Save the long values to the specified Dalvik register pair */
2607 storeValuePair(cUnit, reg0, reg1, vDest, reg2);
2608 } else if (dalvikOpCode == OP_CONST_16) {
2609 int vDest = mir->dalvikInsn.vA;
2610 int BBBB = mir->dalvikInsn.vB;
2612 reg0 = selectFirstRegister(cUnit, vNone, false);
2613 reg1 = NEXT_REG(reg0);
2615 loadConstant(cUnit, reg0, BBBB);
2616 storeValue(cUnit, reg0, vDest, reg1);
2623 /* Compare agaist zero */
2624 static bool handleFmt21t(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
2627 OpCode dalvikOpCode = mir->dalvikInsn.opCode;
2628 ArmConditionCode cond;
2629 int reg0 = selectFirstRegister(cUnit, mir->dalvikInsn.vA, false);
2631 loadValue(cUnit, mir->dalvikInsn.vA, reg0);
2632 opRegImm(cUnit, OP_CMP, reg0, 0, rNone);
2634 //TUNING: break this out to allow use of Thumb2 CB[N]Z
2635 switch (dalvikOpCode) {
2656 LOGE("Unexpected opcode (%d) for Fmt21t\n", dalvikOpCode);
2659 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
2660 /* This mostly likely will be optimized away in a later phase */
2661 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
2665 static bool handleFmt22b_Fmt22s(CompilationUnit *cUnit, MIR *mir)
2667 OpCode dalvikOpCode = mir->dalvikInsn.opCode;
2668 int vSrc = mir->dalvikInsn.vB;
2669 int vDest = mir->dalvikInsn.vA;
2670 int lit = mir->dalvikInsn.vC;
2671 OpKind op = 0; /* Make gcc happy */
2672 int reg0, reg1, regDest;
2674 reg0 = selectFirstRegister(cUnit, vSrc, false);
2675 reg1 = NEXT_REG(reg0);
2676 regDest = NEXT_REG(reg1);
2678 int __aeabi_idivmod(int op1, int op2);
2679 int __aeabi_idiv(int op1, int op2);
2681 switch (dalvikOpCode) {
2682 case OP_ADD_INT_LIT8:
2683 case OP_ADD_INT_LIT16:
2684 loadValue(cUnit, vSrc, reg0);
2685 opRegImm(cUnit, OP_ADD, reg0, lit, reg1);
2686 storeValue(cUnit, reg0, vDest, reg1);
2689 case OP_RSUB_INT_LIT8:
2691 loadValue(cUnit, vSrc, reg1);
2692 loadConstant(cUnit, reg0, lit);
2693 opRegRegReg(cUnit, OP_SUB, regDest, reg0, reg1);
2694 storeValue(cUnit, regDest, vDest, reg1);
2697 case OP_MUL_INT_LIT8:
2698 case OP_MUL_INT_LIT16:
2699 case OP_AND_INT_LIT8:
2700 case OP_AND_INT_LIT16:
2701 case OP_OR_INT_LIT8:
2702 case OP_OR_INT_LIT16:
2703 case OP_XOR_INT_LIT8:
2704 case OP_XOR_INT_LIT16:
2705 loadValue(cUnit, vSrc, reg0);
2706 switch (dalvikOpCode) {
2707 case OP_MUL_INT_LIT8:
2708 case OP_MUL_INT_LIT16:
2711 case OP_AND_INT_LIT8:
2712 case OP_AND_INT_LIT16:
2715 case OP_OR_INT_LIT8:
2716 case OP_OR_INT_LIT16:
2719 case OP_XOR_INT_LIT8:
2720 case OP_XOR_INT_LIT16:
2726 opRegRegImm(cUnit, op, regDest, reg0, lit, reg1);
2727 storeValue(cUnit, regDest, vDest, reg1);
2730 case OP_SHL_INT_LIT8:
2731 case OP_SHR_INT_LIT8:
2732 case OP_USHR_INT_LIT8:
2733 loadValue(cUnit, vSrc, reg0);
2734 switch (dalvikOpCode) {
2735 case OP_SHL_INT_LIT8:
2738 case OP_SHR_INT_LIT8:
2741 case OP_USHR_INT_LIT8:
2744 default: dvmAbort();
2747 opRegRegImm(cUnit, op, regDest, reg0, lit, reg1);
2748 storeValue(cUnit, regDest, vDest, reg1);
2750 storeValue(cUnit, reg0, vDest, reg1);
2754 case OP_DIV_INT_LIT8:
2755 case OP_DIV_INT_LIT16:
2756 /* Register usage based on the calling convention */
2758 /* Let the interpreter deal with div by 0 */
2759 genInterpSingleStep(cUnit, mir);
2762 loadConstant(cUnit, r2, (int)__aeabi_idiv);
2763 loadConstant(cUnit, r1, lit);
2764 loadValue(cUnit, vSrc, r0);
2765 opReg(cUnit, OP_BLX, r2);
2766 storeValue(cUnit, r0, vDest, r2);
2769 case OP_REM_INT_LIT8:
2770 case OP_REM_INT_LIT16:
2771 /* Register usage based on the calling convention */
2773 /* Let the interpreter deal with div by 0 */
2774 genInterpSingleStep(cUnit, mir);
2777 loadConstant(cUnit, r2, (int)__aeabi_idivmod);
2778 loadConstant(cUnit, r1, lit);
2779 loadValue(cUnit, vSrc, r0);
2780 opReg(cUnit, OP_BLX, r2);
2781 storeValue(cUnit, r1, vDest, r2);
2789 static bool handleFmt22c(CompilationUnit *cUnit, MIR *mir)
2791 OpCode dalvikOpCode = mir->dalvikInsn.opCode;
2794 if (dalvikOpCode >= OP_IGET && dalvikOpCode <= OP_IPUT_SHORT) {
2795 InstField *pInstField = (InstField *)
2796 cUnit->method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vC];
2798 assert(pInstField != NULL);
2799 fieldOffset = pInstField->byteOffset;
2801 /* Deliberately break the code while make the compiler happy */
2804 switch (dalvikOpCode) {
2805 case OP_NEW_ARRAY: {
2806 void *classPtr = (void*)
2807 (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vC]);
2808 assert(classPtr != NULL);
2809 loadValue(cUnit, mir->dalvikInsn.vB, r1); /* Len */
2810 loadConstant(cUnit, r0, (int) classPtr );
2811 loadConstant(cUnit, r4PC, (int)dvmAllocArrayByClass);
2813 * "len < 0": bail to the interpreter to re-execute the
2817 genRegImmCheck(cUnit, ARM_COND_MI, r1, 0, mir->offset, NULL);
2818 genExportPC(cUnit, mir, r2, r3 );
2819 loadConstant(cUnit, r2, ALLOC_DONT_TRACK);
2820 opReg(cUnit, OP_BLX, r4PC);
2821 /* generate a branch over if allocation is successful */
2822 opRegImm(cUnit, OP_CMP, r0, 0, rNone); /* NULL? */
2823 ArmLIR *branchOver = opCondBranch(cUnit, ARM_COND_NE);
2825 * OOM exception needs to be thrown here and cannot re-execute
2827 loadConstant(cUnit, r0,
2828 (int) (cUnit->method->insns + mir->offset));
2829 genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
2832 ArmLIR *target = newLIR0(cUnit, ARM_PSEUDO_TARGET_LABEL);
2833 target->defMask = ENCODE_ALL;
2834 branchOver->generic.target = (LIR *) target;
2835 storeValue(cUnit, r0, mir->dalvikInsn.vA, r1);
2838 case OP_INSTANCE_OF: {
2839 ClassObject *classPtr =
2840 (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vC]);
2841 assert(classPtr != NULL);
2842 loadValue(cUnit, mir->dalvikInsn.vB, r0); /* Ref */
2843 loadConstant(cUnit, r2, (int) classPtr );
2844 //TUNING: compare to 0 primative to allow use of CB[N]Z
2845 opRegImm(cUnit, OP_CMP, r0, 0, rNone); /* NULL? */
2846 /* When taken r0 has NULL which can be used for store directly */
2847 ArmLIR *branch1 = opCondBranch(cUnit, ARM_COND_EQ);
2848 /* r1 now contains object->clazz */
2849 loadWordDisp(cUnit, r0, offsetof(Object, clazz), r1);
2850 loadConstant(cUnit, r4PC, (int)dvmInstanceofNonTrivial);
2851 loadConstant(cUnit, r0, 1); /* Assume true */
2852 opRegReg(cUnit, OP_CMP, r1, r2);
2853 ArmLIR *branch2 = opCondBranch(cUnit, ARM_COND_EQ);
2854 opRegReg(cUnit, OP_MOV, r0, r1);
2855 opRegReg(cUnit, OP_MOV, r1, r2);
2856 opReg(cUnit, OP_BLX, r4PC);
2857 /* branch target here */
2858 ArmLIR *target = newLIR0(cUnit, ARM_PSEUDO_TARGET_LABEL);
2859 target->defMask = ENCODE_ALL;
2860 storeValue(cUnit, r0, mir->dalvikInsn.vA, r1);
2861 branch1->generic.target = (LIR *)target;
2862 branch2->generic.target = (LIR *)target;
2866 genIGetWide(cUnit, mir, fieldOffset);
2869 case OP_IGET_OBJECT:
2870 genIGet(cUnit, mir, WORD, fieldOffset);
2872 case OP_IGET_BOOLEAN:
2873 genIGet(cUnit, mir, UNSIGNED_BYTE, fieldOffset);
2876 genIGet(cUnit, mir, SIGNED_BYTE, fieldOffset);
2879 genIGet(cUnit, mir, UNSIGNED_HALF, fieldOffset);
2882 genIGet(cUnit, mir, SIGNED_HALF, fieldOffset);
2885 genIPutWide(cUnit, mir, fieldOffset);
2888 case OP_IPUT_OBJECT:
2889 genIPut(cUnit, mir, WORD, fieldOffset);
2893 genIPut(cUnit, mir, UNSIGNED_HALF, fieldOffset);
2896 case OP_IPUT_BOOLEAN:
2897 genIPut(cUnit, mir, UNSIGNED_BYTE, fieldOffset);
2905 static bool handleFmt22cs(CompilationUnit *cUnit, MIR *mir)
2907 OpCode dalvikOpCode = mir->dalvikInsn.opCode;
2908 int fieldOffset = mir->dalvikInsn.vC;
2909 switch (dalvikOpCode) {
2911 case OP_IGET_OBJECT_QUICK:
2912 genIGet(cUnit, mir, WORD, fieldOffset);
2915 case OP_IPUT_OBJECT_QUICK:
2916 genIPut(cUnit, mir, WORD, fieldOffset);
2918 case OP_IGET_WIDE_QUICK:
2919 genIGetWide(cUnit, mir, fieldOffset);
2921 case OP_IPUT_WIDE_QUICK:
2922 genIPutWide(cUnit, mir, fieldOffset);
2931 /* Compare agaist zero */
2932 static bool handleFmt22t(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
2935 OpCode dalvikOpCode = mir->dalvikInsn.opCode;
2936 ArmConditionCode cond;
2939 if (cUnit->registerScoreboard.liveDalvikReg == (int) mir->dalvikInsn.vA) {
2940 reg0 = selectFirstRegister(cUnit, mir->dalvikInsn.vA, false);
2941 reg1 = NEXT_REG(reg0);
2942 /* Load vB first since vA can be fetched via a move */
2943 loadValue(cUnit, mir->dalvikInsn.vB, reg1);
2944 loadValue(cUnit, mir->dalvikInsn.vA, reg0);
2946 reg0 = selectFirstRegister(cUnit, mir->dalvikInsn.vB, false);
2947 reg1 = NEXT_REG(reg0);
2948 /* Load vA first since vB can be fetched via a move */
2949 loadValue(cUnit, mir->dalvikInsn.vA, reg0);
2950 loadValue(cUnit, mir->dalvikInsn.vB, reg1);
2952 opRegReg(cUnit, OP_CMP, reg0, reg1);
2954 switch (dalvikOpCode) {
2975 LOGE("Unexpected opcode (%d) for Fmt22t\n", dalvikOpCode);
2978 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
2979 /* This mostly likely will be optimized away in a later phase */
2980 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
2984 static bool handleFmt22x_Fmt32x(CompilationUnit *cUnit, MIR *mir)
2986 OpCode opCode = mir->dalvikInsn.opCode;
2987 int vSrc1Dest = mir->dalvikInsn.vA;
2988 int vSrc2 = mir->dalvikInsn.vB;
2989 int reg0, reg1, reg2;
2993 case OP_MOVE_OBJECT_16:
2994 case OP_MOVE_FROM16:
2995 case OP_MOVE_OBJECT_FROM16: {
2996 reg0 = selectFirstRegister(cUnit, vSrc2, false);
2997 reg1 = NEXT_REG(reg0);
2998 loadValue(cUnit, vSrc2, reg0);
2999 storeValue(cUnit, reg0, vSrc1Dest, reg1);
3002 case OP_MOVE_WIDE_16:
3003 case OP_MOVE_WIDE_FROM16: {
3004 reg0 = selectFirstRegister(cUnit, vSrc2, true);
3005 reg1 = NEXT_REG(reg0);
3006 reg2 = NEXT_REG(reg1);
3007 loadValuePair(cUnit, vSrc2, reg0, reg1);
3008 storeValuePair(cUnit, reg0, reg1, vSrc1Dest, reg2);
3017 static bool handleFmt23x(CompilationUnit *cUnit, MIR *mir)
3019 OpCode opCode = mir->dalvikInsn.opCode;
3020 int vA = mir->dalvikInsn.vA;
3021 int vB = mir->dalvikInsn.vB;
3022 int vC = mir->dalvikInsn.vC;
3024 /* Don't optimize for register usage since out-of-line handlers are used */
3025 if ( (opCode >= OP_ADD_INT) && (opCode <= OP_REM_DOUBLE)) {
3026 return genArithOp( cUnit, mir );
3032 case OP_CMPL_DOUBLE:
3033 case OP_CMPG_DOUBLE:
3034 return genCmpX(cUnit, mir, vA, vB, vC);
3036 genCmpLong(cUnit, mir, vA, vB, vC);
3039 genArrayGet(cUnit, mir, LONG, vB, vC, vA, 3);
3042 case OP_AGET_OBJECT:
3043 genArrayGet(cUnit, mir, WORD, vB, vC, vA, 2);
3045 case OP_AGET_BOOLEAN:
3046 genArrayGet(cUnit, mir, UNSIGNED_BYTE, vB, vC, vA, 0);
3049 genArrayGet(cUnit, mir, SIGNED_BYTE, vB, vC, vA, 0);
3052 genArrayGet(cUnit, mir, UNSIGNED_HALF, vB, vC, vA, 1);
3055 genArrayGet(cUnit, mir, SIGNED_HALF, vB, vC, vA, 1);
3058 genArrayPut(cUnit, mir, LONG, vB, vC, vA, 3);
3061 case OP_APUT_OBJECT:
3062 genArrayPut(cUnit, mir, WORD, vB, vC, vA, 2);
3066 genArrayPut(cUnit, mir, UNSIGNED_HALF, vB, vC, vA, 1);
3069 case OP_APUT_BOOLEAN:
3070 genArrayPut(cUnit, mir, UNSIGNED_BYTE, vB, vC, vA, 0);
3078 static bool handleFmt31t(CompilationUnit *cUnit, MIR *mir)
3080 OpCode dalvikOpCode = mir->dalvikInsn.opCode;
3081 switch (dalvikOpCode) {
3082 case OP_FILL_ARRAY_DATA: {
3083 loadConstant(cUnit, r4PC, (int)dvmInterpHandleFillArrayData);
3084 loadValue(cUnit, mir->dalvikInsn.vA, r0);
3085 loadConstant(cUnit, r1, (mir->dalvikInsn.vB << 1) +
3086 (int) (cUnit->method->insns + mir->offset));
3087 genExportPC(cUnit, mir, r2, r3 );
3088 opReg(cUnit, OP_BLX, r4PC);
3089 genZeroCheck(cUnit, r0, mir->offset, NULL);
3094 * - Add a 1 to 3-entry per-location cache here to completely
3095 * bypass the dvmInterpHandle[Packed/Sparse]Switch call w/ chaining
3096 * - Use out-of-line handlers for both of these
3098 case OP_PACKED_SWITCH:
3099 case OP_SPARSE_SWITCH: {
3100 if (dalvikOpCode == OP_PACKED_SWITCH) {
3101 loadConstant(cUnit, r4PC, (int)dvmInterpHandlePackedSwitch);
3103 loadConstant(cUnit, r4PC, (int)dvmInterpHandleSparseSwitch);
3105 loadValue(cUnit, mir->dalvikInsn.vA, r1);
3106 loadConstant(cUnit, r0, (mir->dalvikInsn.vB << 1) +
3107 (int) (cUnit->method->insns + mir->offset));
3108 opReg(cUnit, OP_BLX, r4PC);
3109 loadConstant(cUnit, r1, (int)(cUnit->method->insns + mir->offset));
3110 loadWordDisp(cUnit, rGLUE, offsetof(InterpState,
3111 jitToInterpEntries.dvmJitToInterpNoChain), r2);
3112 opRegReg(cUnit, OP_ADD, r0, r0);
3113 opRegRegReg(cUnit, OP_ADD, r4PC, r0, r1);
3114 opReg(cUnit, OP_BLX, r2);
3123 static bool handleFmt35c_3rc(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
3126 ArmLIR *retChainingCell = NULL;
3127 ArmLIR *pcrLabel = NULL;
3129 if (bb->fallThrough != NULL)
3130 retChainingCell = &labelList[bb->fallThrough->id];
3132 DecodedInstruction *dInsn = &mir->dalvikInsn;
3133 switch (mir->dalvikInsn.opCode) {
3135 * calleeMethod = this->clazz->vtable[
3136 * method->clazz->pDvmDex->pResMethods[BBBB]->methodIndex
3139 case OP_INVOKE_VIRTUAL:
3140 case OP_INVOKE_VIRTUAL_RANGE: {
3141 ArmLIR *predChainingCell = &labelList[bb->taken->id];
3143 cUnit->method->clazz->pDvmDex->pResMethods[dInsn->vB]->
3146 if (mir->dalvikInsn.opCode == OP_INVOKE_VIRTUAL)
3147 genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel);
3149 genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
3151 genInvokeVirtualCommon(cUnit, mir, methodIndex,
3158 * calleeMethod = method->clazz->super->vtable[method->clazz->pDvmDex
3159 * ->pResMethods[BBBB]->methodIndex]
3161 /* TODO - not excersized in RunPerf.jar */
3162 case OP_INVOKE_SUPER:
3163 case OP_INVOKE_SUPER_RANGE: {
3164 int mIndex = cUnit->method->clazz->pDvmDex->
3165 pResMethods[dInsn->vB]->methodIndex;
3166 const Method *calleeMethod =
3167 cUnit->method->clazz->super->vtable[mIndex];
3169 if (mir->dalvikInsn.opCode == OP_INVOKE_SUPER)
3170 genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel);
3172 genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
3174 /* r0 = calleeMethod */
3175 loadConstant(cUnit, r0, (int) calleeMethod);
3177 genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
3181 /* calleeMethod = method->clazz->pDvmDex->pResMethods[BBBB] */
3182 case OP_INVOKE_DIRECT:
3183 case OP_INVOKE_DIRECT_RANGE: {
3184 const Method *calleeMethod =
3185 cUnit->method->clazz->pDvmDex->pResMethods[dInsn->vB];
3187 if (mir->dalvikInsn.opCode == OP_INVOKE_DIRECT)
3188 genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel);
3190 genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
3192 /* r0 = calleeMethod */
3193 loadConstant(cUnit, r0, (int) calleeMethod);
3195 genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
3199 /* calleeMethod = method->clazz->pDvmDex->pResMethods[BBBB] */
3200 case OP_INVOKE_STATIC:
3201 case OP_INVOKE_STATIC_RANGE: {
3202 const Method *calleeMethod =
3203 cUnit->method->clazz->pDvmDex->pResMethods[dInsn->vB];
3205 if (mir->dalvikInsn.opCode == OP_INVOKE_STATIC)
3206 genProcessArgsNoRange(cUnit, mir, dInsn,
3207 NULL /* no null check */);
3209 genProcessArgsRange(cUnit, mir, dInsn,
3210 NULL /* no null check */);
3212 /* r0 = calleeMethod */
3213 loadConstant(cUnit, r0, (int) calleeMethod);
3215 genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
3220 * TODO: When we move to using upper registers in Thumb2, make sure
3221 * the register allocater is told that r9, r10, & r12 are killed
3225 * calleeMethod = dvmFindInterfaceMethodInCache(this->clazz,
3226 * BBBB, method, method->clazz->pDvmDex)
3228 * Given "invoke-interface {v0}", the following is the generated code:
3230 * 0x426a9abe : ldr r0, [r5, #0] --+
3231 * 0x426a9ac0 : mov r7, r5 |
3232 * 0x426a9ac2 : sub r7, #24 |
3233 * 0x426a9ac4 : cmp r0, #0 | genProcessArgsNoRange
3234 * 0x426a9ac6 : beq 0x426a9afe |
3235 * 0x426a9ac8 : stmia r7, <r0> --+
3236 * 0x426a9aca : ldr r4, [pc, #104] --> r4 <- dalvikPC of this invoke
3237 * 0x426a9acc : add r1, pc, #52 --> r1 <- &retChainingCell
3238 * 0x426a9ace : add r2, pc, #60 --> r2 <- &predictedChainingCell
3239 * 0x426a9ad0 : blx_1 0x426a918c --+ TEMPLATE_INVOKE_METHOD_
3240 * 0x426a9ad2 : blx_2 see above --+ PREDICTED_CHAIN
3241 * 0x426a9ad4 : b 0x426a9b0c --> off to the predicted chain
3242 * 0x426a9ad6 : b 0x426a9afe --> punt to the interpreter
3243 * 0x426a9ad8 : mov r9, r1 --+
3244 * 0x426a9ada : mov r10, r2 |
3245 * 0x426a9adc : mov r12, r3 |
3246 * 0x426a9ade : mov r0, r3 |
3247 * 0x426a9ae0 : mov r1, #74 | dvmFindInterfaceMethodInCache
3248 * 0x426a9ae2 : ldr r2, [pc, #76] |
3249 * 0x426a9ae4 : ldr r3, [pc, #68] |
3250 * 0x426a9ae6 : ldr r7, [pc, #64] |
3251 * 0x426a9ae8 : blx r7 --+
3252 * 0x426a9aea : mov r1, r9 --> r1 <- rechain count
3253 * 0x426a9aec : cmp r1, #0 --> compare against 0
3254 * 0x426a9aee : bgt 0x426a9af8 --> >=0? don't rechain
3255 * 0x426a9af0 : ldr r7, [r6, #96] --+
3256 * 0x426a9af2 : mov r2, r10 | dvmJitToPatchPredictedChain
3257 * 0x426a9af4 : mov r3, r12 |
3258 * 0x426a9af6 : blx r7 --+
3259 * 0x426a9af8 : add r1, pc, #8 --> r1 <- &retChainingCell
3260 * 0x426a9afa : blx_1 0x426a9098 --+ TEMPLATE_INVOKE_METHOD_NO_OPT
3261 * 0x426a9afc : blx_2 see above --+
3262 * -------- reconstruct dalvik PC : 0x428b786c @ +0x001e
3263 * 0x426a9afe (0042): ldr r0, [pc, #52]
3264 * Exception_Handling:
3265 * 0x426a9b00 (0044): ldr r1, [r6, #84]
3266 * 0x426a9b02 (0046): blx r1
3267 * 0x426a9b04 (0048): .align4
3268 * -------- chaining cell (hot): 0x0021
3269 * 0x426a9b04 (0048): ldr r0, [r6, #92]
3270 * 0x426a9b06 (004a): blx r0
3271 * 0x426a9b08 (004c): data 0x7872(30834)
3272 * 0x426a9b0a (004e): data 0x428b(17035)
3273 * 0x426a9b0c (0050): .align4
3274 * -------- chaining cell (predicted)
3275 * 0x426a9b0c (0050): data 0x0000(0) --> will be patched into bx
3276 * 0x426a9b0e (0052): data 0x0000(0)
3277 * 0x426a9b10 (0054): data 0x0000(0) --> class
3278 * 0x426a9b12 (0056): data 0x0000(0)
3279 * 0x426a9b14 (0058): data 0x0000(0) --> method
3280 * 0x426a9b16 (005a): data 0x0000(0)
3281 * 0x426a9b18 (005c): data 0x0000(0) --> reset count
3282 * 0x426a9b1a (005e): data 0x0000(0)
3283 * 0x426a9b28 (006c): .word (0xad0392a5)
3284 * 0x426a9b2c (0070): .word (0x6e750)
3285 * 0x426a9b30 (0074): .word (0x4109a618)
3286 * 0x426a9b34 (0078): .word (0x428b786c)
3288 case OP_INVOKE_INTERFACE:
3289 case OP_INVOKE_INTERFACE_RANGE: {
3290 ArmLIR *predChainingCell = &labelList[bb->taken->id];
3291 int methodIndex = dInsn->vB;
3293 if (mir->dalvikInsn.opCode == OP_INVOKE_INTERFACE)
3294 genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel);
3296 genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
3298 /* "this" is already left in r0 by genProcessArgs* */
3300 /* r4PC = dalvikCallsite */
3301 loadConstant(cUnit, r4PC,
3302 (int) (cUnit->method->insns + mir->offset));
3304 /* r1 = &retChainingCell */
3305 ArmLIR *addrRetChain =
3306 opRegRegImm(cUnit, OP_ADD, r1, rpc, 0, rNone);
3307 addrRetChain->generic.target = (LIR *) retChainingCell;
3309 /* r2 = &predictedChainingCell */
3310 ArmLIR *predictedChainingCell =
3311 opRegRegImm(cUnit, OP_ADD, r2, rpc, 0, rNone);
3312 predictedChainingCell->generic.target = (LIR *) predChainingCell;
3314 genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN);
3316 /* return through lr - jump to the chaining cell */
3317 genUnconditionalBranch(cUnit, predChainingCell);
3320 * null-check on "this" may have been eliminated, but we still need
3321 * a PC-reconstruction label for stack overflow bailout.
3323 if (pcrLabel == NULL) {
3324 int dPC = (int) (cUnit->method->insns + mir->offset);
3325 pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true);
3326 pcrLabel->opCode = ARM_PSEUDO_PC_RECONSTRUCTION_CELL;
3327 pcrLabel->operands[0] = dPC;
3328 pcrLabel->operands[1] = mir->offset;
3329 /* Insert the place holder to the growable list */
3330 dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel);
3333 /* return through lr+2 - punt to the interpreter */
3334 genUnconditionalBranch(cUnit, pcrLabel);
3337 * return through lr+4 - fully resolve the callee method.
3339 * r2 <- &predictedChainCell
3342 * r7 <- this->class->vtable
3345 /* Save count, &predictedChainCell, and class to high regs first */
3346 opRegReg(cUnit, OP_MOV, r9, r1);
3347 opRegReg(cUnit, OP_MOV, r10, r2);
3348 opRegReg(cUnit, OP_MOV, r12, r3);
3350 /* r0 now contains this->clazz */
3351 opRegReg(cUnit, OP_MOV, r0, r3);
3354 loadConstant(cUnit, r1, dInsn->vB);
3356 /* r2 = method (caller) */
3357 loadConstant(cUnit, r2, (int) cUnit->method);
3360 loadConstant(cUnit, r3, (int) cUnit->method->clazz->pDvmDex);
3362 loadConstant(cUnit, r7,
3363 (intptr_t) dvmFindInterfaceMethodInCache);
3364 opReg(cUnit, OP_BLX, r7);
3366 /* r0 = calleeMethod (returned from dvmFindInterfaceMethodInCache */
3368 opRegReg(cUnit, OP_MOV, r1, r9);
3370 /* Check if rechain limit is reached */
3371 opRegImm(cUnit, OP_CMP, r1, 0, rNone);
3373 ArmLIR *bypassRechaining = opCondBranch(cUnit, ARM_COND_GT);
3375 loadWordDisp(cUnit, rGLUE, offsetof(InterpState,
3376 jitToInterpEntries.dvmJitToPatchPredictedChain), r7);
3378 opRegReg(cUnit, OP_MOV, r2, r10);
3379 opRegReg(cUnit, OP_MOV, r3, r12);
3383 * r2 = &predictedChainingCell
3386 * &returnChainingCell has been loaded into r1 but is not needed
3387 * when patching the chaining cell and will be clobbered upon
3388 * returning so it will be reconstructed again.
3390 opReg(cUnit, OP_BLX, r7);
3392 /* r1 = &retChainingCell */
3393 addrRetChain = opRegRegImm(cUnit, OP_ADD, r1, rpc, 0, rNone);
3394 addrRetChain->generic.target = (LIR *) retChainingCell;
3396 bypassRechaining->generic.target = (LIR *) addrRetChain;
3399 * r0 = this, r1 = calleeMethod,
3400 * r1 = &ChainingCell,
3401 * r4PC = callsiteDPC,
3403 genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_NO_OPT);
3404 #if defined(INVOKE_STATS)
3405 gDvmJit.invokePredictedChain++;
3407 /* Handle exceptions using the interpreter */
3408 genTrap(cUnit, mir->offset, pcrLabel);
3412 case OP_INVOKE_DIRECT_EMPTY: {
3415 case OP_FILLED_NEW_ARRAY:
3416 case OP_FILLED_NEW_ARRAY_RANGE: {
3417 /* Just let the interpreter deal with these */
3418 genInterpSingleStep(cUnit, mir);
3427 static bool handleFmt35ms_3rms(CompilationUnit *cUnit, MIR *mir,
3428 BasicBlock *bb, ArmLIR *labelList)
3430 ArmLIR *retChainingCell = &labelList[bb->fallThrough->id];
3431 ArmLIR *predChainingCell = &labelList[bb->taken->id];
3432 ArmLIR *pcrLabel = NULL;
3434 DecodedInstruction *dInsn = &mir->dalvikInsn;
3435 switch (mir->dalvikInsn.opCode) {
3436 /* calleeMethod = this->clazz->vtable[BBBB] */
3437 case OP_INVOKE_VIRTUAL_QUICK_RANGE:
3438 case OP_INVOKE_VIRTUAL_QUICK: {
3439 int methodIndex = dInsn->vB;
3440 if (mir->dalvikInsn.opCode == OP_INVOKE_VIRTUAL_QUICK)
3441 genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel);
3443 genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
3445 genInvokeVirtualCommon(cUnit, mir, methodIndex,
3451 /* calleeMethod = method->clazz->super->vtable[BBBB] */
3452 case OP_INVOKE_SUPER_QUICK:
3453 case OP_INVOKE_SUPER_QUICK_RANGE: {
3454 const Method *calleeMethod =
3455 cUnit->method->clazz->super->vtable[dInsn->vB];
3457 if (mir->dalvikInsn.opCode == OP_INVOKE_SUPER_QUICK)
3458 genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel);
3460 genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
3462 /* r0 = calleeMethod */
3463 loadConstant(cUnit, r0, (int) calleeMethod);
3465 genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
3467 /* Handle exceptions using the interpreter */
3468 genTrap(cUnit, mir->offset, pcrLabel);
3478 * NOTE: We assume here that the special native inline routines
3479 * are side-effect free. By making this assumption, we can safely
3480 * re-execute the routine from the interpreter if it decides it
3481 * wants to throw an exception. We still need to EXPORT_PC(), though.
3483 static bool handleFmt3inline(CompilationUnit *cUnit, MIR *mir)
3485 DecodedInstruction *dInsn = &mir->dalvikInsn;
3486 switch( mir->dalvikInsn.opCode) {
3487 case OP_EXECUTE_INLINE: {
3489 const InlineOperation* inLineTable = dvmGetInlineOpsTable();
3490 int offset = offsetof(InterpState, retval);
3491 int operation = dInsn->vB;
3493 switch (operation) {
3494 case INLINE_EMPTYINLINEMETHOD:
3495 return false; /* Nop */
3496 case INLINE_STRING_LENGTH:
3497 return genInlinedStringLength(cUnit, mir);
3498 case INLINE_MATH_ABS_INT:
3499 return genInlinedAbsInt(cUnit, mir);
3500 case INLINE_MATH_ABS_LONG:
3501 return genInlinedAbsLong(cUnit, mir);
3502 case INLINE_MATH_MIN_INT:
3503 return genInlinedMinMaxInt(cUnit, mir, true);
3504 case INLINE_MATH_MAX_INT:
3505 return genInlinedMinMaxInt(cUnit, mir, false);
3506 case INLINE_STRING_CHARAT:
3507 return genInlinedStringCharAt(cUnit, mir);
3508 case INLINE_MATH_SQRT:
3509 if (genInlineSqrt(cUnit, mir))
3512 break; /* Handle with C routine */
3513 case INLINE_MATH_COS:
3514 case INLINE_MATH_SIN:
3515 break; /* Handle with C routine */
3516 case INLINE_MATH_ABS_FLOAT:
3517 return genInlinedAbsFloat(cUnit, mir);
3518 case INLINE_MATH_ABS_DOUBLE:
3519 return genInlinedAbsDouble(cUnit, mir);
3520 case INLINE_STRING_COMPARETO:
3521 case INLINE_STRING_EQUALS:
3522 case INLINE_STRING_INDEXOF_I:
3523 case INLINE_STRING_INDEXOF_II:
3529 /* Materialize pointer to retval & push */
3530 opRegReg(cUnit, OP_MOV, r4PC, rGLUE);
3531 opRegImm(cUnit, OP_ADD, r4PC, offset, rNone);
3533 /* Push r4 and (just to take up space) r5) */
3534 opImm(cUnit, OP_PUSH, (1 << r4PC | 1 << rFP));
3536 /* Get code pointer to inline routine */
3537 loadConstant(cUnit, r4PC, (int)inLineTable[operation].func);
3540 genExportPC(cUnit, mir, r0, r1 );
3542 /* Load arguments to r0 through r3 as applicable */
3543 for (i=0; i < dInsn->vA; i++) {
3544 loadValue(cUnit, dInsn->arg[i], i);
3546 /* Call inline routine */
3547 opReg(cUnit, OP_BLX, r4PC);
3550 opRegImm(cUnit, OP_ADD, r13, 8, rNone);
3552 /* Did we throw? If so, redo under interpreter*/
3553 genZeroCheck(cUnit, r0, mir->offset, NULL);
3555 resetRegisterScoreboard(cUnit);
3564 static bool handleFmt51l(CompilationUnit *cUnit, MIR *mir)
3566 loadConstant(cUnit, r0, mir->dalvikInsn.vB_wide & 0xFFFFFFFFUL);
3567 loadConstant(cUnit, r1, (mir->dalvikInsn.vB_wide>>32) & 0xFFFFFFFFUL);
3568 storeValuePair(cUnit, r0, r1, mir->dalvikInsn.vA, r2);
3573 * The following are special processing routines that handle transfer of
3574 * controls between compiled code and the interpreter. Certain VM states like
3575 * Dalvik PC and special-purpose registers are reconstructed here.
3578 /* Chaining cell for code that may need warmup. */
3579 static void handleNormalChainingCell(CompilationUnit *cUnit,
3580 unsigned int offset)
3582 loadWordDisp(cUnit, rGLUE, offsetof(InterpState,
3583 jitToInterpEntries.dvmJitToInterpNormal), r0);
3584 opReg(cUnit, OP_BLX, r0);
3585 addWordData(cUnit, (int) (cUnit->method->insns + offset), true);
3589 * Chaining cell for instructions that immediately following already translated
3592 static void handleHotChainingCell(CompilationUnit *cUnit,
3593 unsigned int offset)
3595 loadWordDisp(cUnit, rGLUE, offsetof(InterpState,
3596 jitToInterpEntries.dvmJitToTraceSelect), r0);
3597 opReg(cUnit, OP_BLX, r0);
3598 addWordData(cUnit, (int) (cUnit->method->insns + offset), true);
3601 #if defined(WITH_SELF_VERIFICATION) || defined(WITH_JIT_TUNING)
3602 /* Chaining cell for branches that branch back into the same basic block */
3603 static void handleBackwardBranchChainingCell(CompilationUnit *cUnit,
3604 unsigned int offset)
3606 #if defined(WITH_SELF_VERIFICATION)
3607 newLIR3(cUnit, THUMB_LDR_RRI5, r0, rGLUE,
3608 offsetof(InterpState, jitToInterpEntries.dvmJitToBackwardBranch) >> 2);
3610 newLIR3(cUnit, THUMB_LDR_RRI5, r0, rGLUE,
3611 offsetof(InterpState, jitToInterpEntries.dvmJitToInterpNormal) >> 2);
3613 newLIR1(cUnit, THUMB_BLX_R, r0);
3614 addWordData(cUnit, (int) (cUnit->method->insns + offset), true);
3618 /* Chaining cell for monomorphic method invocations. */
3619 static void handleInvokeSingletonChainingCell(CompilationUnit *cUnit,
3620 const Method *callee)
3622 loadWordDisp(cUnit, rGLUE, offsetof(InterpState,
3623 jitToInterpEntries.dvmJitToTraceSelect), r0);
3624 opReg(cUnit, OP_BLX, r0);
3625 addWordData(cUnit, (int) (callee->insns), true);
3628 /* Chaining cell for monomorphic method invocations. */
3629 static void handleInvokePredictedChainingCell(CompilationUnit *cUnit)
3632 /* Should not be executed in the initial state */
3633 addWordData(cUnit, PREDICTED_CHAIN_BX_PAIR_INIT, true);
3634 /* To be filled: class */
3635 addWordData(cUnit, PREDICTED_CHAIN_CLAZZ_INIT, true);
3636 /* To be filled: method */
3637 addWordData(cUnit, PREDICTED_CHAIN_METHOD_INIT, true);
3639 * Rechain count. The initial value of 0 here will trigger chaining upon
3640 * the first invocation of this callsite.
3642 addWordData(cUnit, PREDICTED_CHAIN_COUNTER_INIT, true);
3645 /* Load the Dalvik PC into r0 and jump to the specified target */
3646 static void handlePCReconstruction(CompilationUnit *cUnit,
3647 ArmLIR *targetLabel)
3650 (ArmLIR **) cUnit->pcReconstructionList.elemList;
3651 int numElems = cUnit->pcReconstructionList.numUsed;
3653 for (i = 0; i < numElems; i++) {
3654 dvmCompilerAppendLIR(cUnit, (LIR *) pcrLabel[i]);
3655 /* r0 = dalvik PC */
3656 loadConstant(cUnit, r0, pcrLabel[i]->operands[0]);
3657 genUnconditionalBranch(cUnit, targetLabel);
3661 static char *extendedMIROpNames[MIR_OP_LAST - MIR_OP_FIRST] = {
3663 "MIR_OP_NULL_N_RANGE_UP_CHECK",
3664 "MIR_OP_NULL_N_RANGE_DOWN_CHECK",
3665 "MIR_OP_LOWER_BOUND_CHECK",
3672 * vC = endConditionReg;
3675 * arg[2] = loopBranchConditionCode
3677 static void genHoistedChecksForCountUpLoop(CompilationUnit *cUnit, MIR *mir)
3679 DecodedInstruction *dInsn = &mir->dalvikInsn;
3680 const int lenOffset = offsetof(ArrayObject, length);
3681 const int regArray = 0;
3682 const int regIdxEnd = NEXT_REG(regArray);
3683 const int regLength = regArray;
3684 const int maxC = dInsn->arg[0];
3685 const int minC = dInsn->arg[1];
3687 /* regArray <- arrayRef */
3688 loadValue(cUnit, mir->dalvikInsn.vA, regArray);
3689 loadValue(cUnit, mir->dalvikInsn.vC, regIdxEnd);
3690 genRegImmCheck(cUnit, ARM_COND_EQ, regArray, 0, 0,
3691 (ArmLIR *) cUnit->loopAnalysis->branchToPCR);
3693 /* regLength <- len(arrayRef) */
3694 loadWordDisp(cUnit, regArray, lenOffset, regLength);
3698 * If the loop end condition is ">=" instead of ">", then the largest value
3699 * of the index is "endCondition - 1".
3701 if (dInsn->arg[2] == OP_IF_GE) {
3706 opRegImm(cUnit, OP_ADD, regIdxEnd, delta, regIdxEnd);
3708 /* Punt if "regIdxEnd < len(Array)" is false */
3709 genRegRegCheck(cUnit, ARM_COND_GE, regIdxEnd, regLength, 0,
3710 (ArmLIR *) cUnit->loopAnalysis->branchToPCR);
3716 * vC = endConditionReg;
3719 * arg[2] = loopBranchConditionCode
3721 static void genHoistedChecksForCountDownLoop(CompilationUnit *cUnit, MIR *mir)
3723 DecodedInstruction *dInsn = &mir->dalvikInsn;
3724 const int lenOffset = offsetof(ArrayObject, length);
3725 const int regArray = 0;
3726 const int regIdxInit = NEXT_REG(regArray);
3727 const int regLength = regArray;
3728 const int maxC = dInsn->arg[0];
3729 const int minC = dInsn->arg[1];
3731 /* regArray <- arrayRef */
3732 loadValue(cUnit, mir->dalvikInsn.vA, regArray);
3733 loadValue(cUnit, mir->dalvikInsn.vB, regIdxInit);
3734 genRegImmCheck(cUnit, ARM_COND_EQ, regArray, 0, 0,
3735 (ArmLIR *) cUnit->loopAnalysis->branchToPCR);
3737 /* regLength <- len(arrayRef) */
3738 loadWordDisp(cUnit, regArray, lenOffset, regLength);
3741 opRegImm(cUnit, OP_ADD, regIdxInit, maxC, regIdxInit);
3744 /* Punt if "regIdxInit < len(Array)" is false */
3745 genRegRegCheck(cUnit, ARM_COND_GE, regIdxInit, regLength, 0,
3746 (ArmLIR *) cUnit->loopAnalysis->branchToPCR);
3753 static void genHoistedLowerBoundCheck(CompilationUnit *cUnit, MIR *mir)
3755 DecodedInstruction *dInsn = &mir->dalvikInsn;
3756 const int regIdx = 0;
3757 const int minC = dInsn->vB;
3759 /* regIdx <- initial index value */
3760 loadValue(cUnit, mir->dalvikInsn.vA, regIdx);
3762 /* Punt if "regIdxInit + minC >= 0" is false */
3763 genRegImmCheck(cUnit, ARM_COND_LT, regIdx, -minC, 0,
3764 (ArmLIR *) cUnit->loopAnalysis->branchToPCR);
3767 /* Extended MIR instructions like PHI */
3768 static void handleExtendedMIR(CompilationUnit *cUnit, MIR *mir)
3770 int opOffset = mir->dalvikInsn.opCode - MIR_OP_FIRST;
3771 char *msg = dvmCompilerNew(strlen(extendedMIROpNames[opOffset]) + 1,
3773 strcpy(msg, extendedMIROpNames[opOffset]);
3774 newLIR1(cUnit, ARM_PSEUDO_EXTENDED_MIR, (int) msg);
3776 switch (mir->dalvikInsn.opCode) {
3778 char *ssaString = dvmCompilerGetSSAString(cUnit, mir->ssaRep);
3779 newLIR1(cUnit, ARM_PSEUDO_SSA_REP, (int) ssaString);
3782 case MIR_OP_NULL_N_RANGE_UP_CHECK: {
3783 genHoistedChecksForCountUpLoop(cUnit, mir);
3786 case MIR_OP_NULL_N_RANGE_DOWN_CHECK: {
3787 genHoistedChecksForCountDownLoop(cUnit, mir);
3790 case MIR_OP_LOWER_BOUND_CHECK: {
3791 genHoistedLowerBoundCheck(cUnit, mir);
3795 genUnconditionalBranch(cUnit,
3796 (ArmLIR *) cUnit->loopAnalysis->branchToPCR);
3805 * Create a PC-reconstruction cell for the starting offset of this trace.
3806 * Since the PCR cell is placed near the end of the compiled code which is
3807 * usually out of range for a conditional branch, we put two branches (one
3808 * branch over to the loop body and one layover branch to the actual PCR) at the
3809 * end of the entry block.
3811 static void setupLoopEntryBlock(CompilationUnit *cUnit, BasicBlock *entry,
3814 /* Set up the place holder to reconstruct this Dalvik PC */
3815 ArmLIR *pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true);
3816 pcrLabel->opCode = ARM_PSEUDO_PC_RECONSTRUCTION_CELL;
3817 pcrLabel->operands[0] =
3818 (int) (cUnit->method->insns + entry->startOffset);
3819 pcrLabel->operands[1] = entry->startOffset;
3820 /* Insert the place holder to the growable list */
3821 dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel);
3824 * Next, create two branches - one branch over to the loop body and the
3825 * other branch to the PCR cell to punt.
3827 ArmLIR *branchToBody = dvmCompilerNew(sizeof(ArmLIR), true);
3828 branchToBody->opCode = THUMB_B_UNCOND;
3829 branchToBody->generic.target = (LIR *) bodyLabel;
3830 setupResourceMasks(branchToBody);
3831 cUnit->loopAnalysis->branchToBody = (LIR *) branchToBody;
3833 ArmLIR *branchToPCR = dvmCompilerNew(sizeof(ArmLIR), true);
3834 branchToPCR->opCode = THUMB_B_UNCOND;
3835 branchToPCR->generic.target = (LIR *) pcrLabel;
3836 setupResourceMasks(branchToPCR);
3837 cUnit->loopAnalysis->branchToPCR = (LIR *) branchToPCR;
3840 void dvmCompilerMIR2LIR(CompilationUnit *cUnit)
3842 /* Used to hold the labels of each block */
3844 dvmCompilerNew(sizeof(ArmLIR) * cUnit->numBlocks, true);
3845 GrowableList chainingListByType[CHAINING_CELL_LAST];
3849 * Initialize various types chaining lists.
3851 for (i = 0; i < CHAINING_CELL_LAST; i++) {
3852 dvmInitGrowableList(&chainingListByType[i], 2);
3855 BasicBlock **blockList = cUnit->blockList;
3857 if (cUnit->executionCount) {
3859 * Reserve 6 bytes at the beginning of the trace
3860 * +----------------------------+
3861 * | execution count (4 bytes) |
3862 * +----------------------------+
3863 * | chain cell offset (2 bytes)|
3864 * +----------------------------+
3865 * ...and then code to increment the execution
3867 * mov r0, pc @ move adr of "mov r0,pc" + 4 to r0
3868 * sub r0, #10 @ back up to addr of executionCount
3873 newLIR1(cUnit, ARM_16BIT_DATA, 0);
3874 newLIR1(cUnit, ARM_16BIT_DATA, 0);
3875 cUnit->chainCellOffsetLIR =
3876 (LIR *) newLIR1(cUnit, ARM_16BIT_DATA, CHAIN_CELL_OFFSET_TAG);
3877 cUnit->headerSize = 6;
3878 /* Thumb instruction used directly here to ensure correct size */
3879 newLIR2(cUnit, THUMB_MOV_RR_H2L, r0, rpc);
3880 newLIR2(cUnit, THUMB_SUB_RI8, r0, 10);
3881 newLIR3(cUnit, THUMB_LDR_RRI5, r1, r0, 0);
3882 newLIR2(cUnit, THUMB_ADD_RI8, r1, 1);
3883 newLIR3(cUnit, THUMB_STR_RRI5, r1, r0, 0);
3885 /* Just reserve 2 bytes for the chain cell offset */
3886 cUnit->chainCellOffsetLIR =
3887 (LIR *) newLIR1(cUnit, ARM_16BIT_DATA, CHAIN_CELL_OFFSET_TAG);
3888 cUnit->headerSize = 2;
3891 /* Handle the content in each basic block */
3892 for (i = 0; i < cUnit->numBlocks; i++) {
3893 blockList[i]->visited = true;
3896 labelList[i].operands[0] = blockList[i]->startOffset;
3898 if (blockList[i]->blockType >= CHAINING_CELL_LAST) {
3900 * Append the label pseudo LIR first. Chaining cells will be handled
3901 * separately afterwards.
3903 dvmCompilerAppendLIR(cUnit, (LIR *) &labelList[i]);
3906 if (blockList[i]->blockType == ENTRY_BLOCK) {
3907 labelList[i].opCode = ARM_PSEUDO_ENTRY_BLOCK;
3908 if (blockList[i]->firstMIRInsn == NULL) {
3911 setupLoopEntryBlock(cUnit, blockList[i],
3912 &labelList[blockList[i]->fallThrough->id]);
3914 } else if (blockList[i]->blockType == EXIT_BLOCK) {
3915 labelList[i].opCode = ARM_PSEUDO_EXIT_BLOCK;
3916 goto gen_fallthrough;
3917 } else if (blockList[i]->blockType == DALVIK_BYTECODE) {
3918 labelList[i].opCode = ARM_PSEUDO_NORMAL_BLOCK_LABEL;
3919 /* Reset the register state */
3920 resetRegisterScoreboard(cUnit);
3922 switch (blockList[i]->blockType) {
3923 case CHAINING_CELL_NORMAL:
3924 labelList[i].opCode = ARM_PSEUDO_CHAINING_CELL_NORMAL;
3925 /* handle the codegen later */
3926 dvmInsertGrowableList(
3927 &chainingListByType[CHAINING_CELL_NORMAL], (void *) i);
3929 case CHAINING_CELL_INVOKE_SINGLETON:
3930 labelList[i].opCode =
3931 ARM_PSEUDO_CHAINING_CELL_INVOKE_SINGLETON;
3932 labelList[i].operands[0] =
3933 (int) blockList[i]->containingMethod;
3934 /* handle the codegen later */
3935 dvmInsertGrowableList(
3936 &chainingListByType[CHAINING_CELL_INVOKE_SINGLETON],
3939 case CHAINING_CELL_INVOKE_PREDICTED:
3940 labelList[i].opCode =
3941 ARM_PSEUDO_CHAINING_CELL_INVOKE_PREDICTED;
3942 /* handle the codegen later */
3943 dvmInsertGrowableList(
3944 &chainingListByType[CHAINING_CELL_INVOKE_PREDICTED],
3947 case CHAINING_CELL_HOT:
3948 labelList[i].opCode =
3949 ARM_PSEUDO_CHAINING_CELL_HOT;
3950 /* handle the codegen later */
3951 dvmInsertGrowableList(
3952 &chainingListByType[CHAINING_CELL_HOT],
3955 case PC_RECONSTRUCTION:
3956 /* Make sure exception handling block is next */
3957 labelList[i].opCode =
3958 ARM_PSEUDO_PC_RECONSTRUCTION_BLOCK_LABEL;
3959 assert (i == cUnit->numBlocks - 2);
3960 handlePCReconstruction(cUnit, &labelList[i+1]);
3962 case EXCEPTION_HANDLING:
3963 labelList[i].opCode = ARM_PSEUDO_EH_BLOCK_LABEL;
3964 if (cUnit->pcReconstructionList.numUsed) {
3965 loadWordDisp(cUnit, rGLUE, offsetof(InterpState,
3966 jitToInterpEntries.dvmJitToInterpPunt),
3968 opReg(cUnit, OP_BLX, r1);
3971 #if defined(WITH_SELF_VERIFICATION) || defined(WITH_JIT_TUNING)
3972 case CHAINING_CELL_BACKWARD_BRANCH:
3973 labelList[i].opCode =
3974 ARM_PSEUDO_CHAINING_CELL_BACKWARD_BRANCH;
3975 /* handle the codegen later */
3976 dvmInsertGrowableList(
3977 &chainingListByType[CHAINING_CELL_BACKWARD_BRANCH],
3987 ArmLIR *headLIR = NULL;
3989 for (mir = blockList[i]->firstMIRInsn; mir; mir = mir->next) {
3990 if (mir->dalvikInsn.opCode >= MIR_OP_FIRST) {
3991 handleExtendedMIR(cUnit, mir);
3995 OpCode dalvikOpCode = mir->dalvikInsn.opCode;
3996 InstructionFormat dalvikFormat =
3997 dexGetInstrFormat(gDvm.instrFormat, dalvikOpCode);
3998 ArmLIR *boundaryLIR =
3999 newLIR2(cUnit, ARM_PSEUDO_DALVIK_BYTECODE_BOUNDARY,
4000 mir->offset, dalvikOpCode);
4002 char *ssaString = dvmCompilerGetSSAString(cUnit, mir->ssaRep);
4003 newLIR1(cUnit, ARM_PSEUDO_SSA_REP, (int) ssaString);
4006 /* Remember the first LIR for this block */
4007 if (headLIR == NULL) {
4008 headLIR = boundaryLIR;
4009 /* Set the first boundaryLIR as a scheduling barrier */
4010 headLIR->defMask = ENCODE_ALL;
4015 * Debugging: screen the opcode first to see if it is in the
4016 * do[-not]-compile list
4019 gDvmJit.includeSelectedOp !=
4020 ((gDvmJit.opList[dalvikOpCode >> 3] &
4021 (1 << (dalvikOpCode & 0x7))) !=
4023 #if defined(WITH_SELF_VERIFICATION)
4024 /* Punt on opcodes we can't replay */
4025 if (selfVerificationPuntOps(dalvikOpCode))
4026 singleStepMe = true;
4028 if (singleStepMe || cUnit->allSingleStep) {
4030 genInterpSingleStep(cUnit, mir);
4032 opcodeCoverage[dalvikOpCode]++;
4033 switch (dalvikFormat) {
4037 notHandled = handleFmt10t_Fmt20t_Fmt30t(cUnit,
4038 mir, blockList[i], labelList);
4041 notHandled = handleFmt10x(cUnit, mir);
4045 notHandled = handleFmt11n_Fmt31i(cUnit, mir);
4048 notHandled = handleFmt11x(cUnit, mir);
4051 notHandled = handleFmt12x(cUnit, mir);
4054 notHandled = handleFmt20bc(cUnit, mir);
4058 notHandled = handleFmt21c_Fmt31c(cUnit, mir);
4061 notHandled = handleFmt21h(cUnit, mir);
4064 notHandled = handleFmt21s(cUnit, mir);
4067 notHandled = handleFmt21t(cUnit, mir, blockList[i],
4072 notHandled = handleFmt22b_Fmt22s(cUnit, mir);
4075 notHandled = handleFmt22c(cUnit, mir);
4078 notHandled = handleFmt22cs(cUnit, mir);
4081 notHandled = handleFmt22t(cUnit, mir, blockList[i],
4086 notHandled = handleFmt22x_Fmt32x(cUnit, mir);
4089 notHandled = handleFmt23x(cUnit, mir);
4092 notHandled = handleFmt31t(cUnit, mir);
4096 notHandled = handleFmt35c_3rc(cUnit, mir, blockList[i],
4101 notHandled = handleFmt35ms_3rms(cUnit, mir,blockList[i],
4105 notHandled = handleFmt3inline(cUnit, mir);
4108 notHandled = handleFmt51l(cUnit, mir);
4116 LOGE("%#06x: Opcode 0x%x (%s) / Fmt %d not handled\n",
4118 dalvikOpCode, getOpcodeName(dalvikOpCode),
4125 if (blockList[i]->blockType == ENTRY_BLOCK) {
4126 dvmCompilerAppendLIR(cUnit,
4127 (LIR *) cUnit->loopAnalysis->branchToBody);
4128 dvmCompilerAppendLIR(cUnit,
4129 (LIR *) cUnit->loopAnalysis->branchToPCR);
4134 * Eliminate redundant loads/stores and delay stores into later
4137 dvmCompilerApplyLocalOptimizations(cUnit, (LIR *) headLIR,
4138 cUnit->lastLIRInsn);
4143 * Check if the block is terminated due to trace length constraint -
4144 * insert an unconditional branch to the chaining cell.
4146 if (blockList[i]->needFallThroughBranch) {
4147 genUnconditionalBranch(cUnit,
4148 &labelList[blockList[i]->fallThrough->id]);
4153 /* Handle the chaining cells in predefined order */
4154 for (i = 0; i < CHAINING_CELL_LAST; i++) {
4156 int *blockIdList = (int *) chainingListByType[i].elemList;
4158 cUnit->numChainingCells[i] = chainingListByType[i].numUsed;
4160 /* No chaining cells of this type */
4161 if (cUnit->numChainingCells[i] == 0)
4164 /* Record the first LIR for a new type of chaining cell */
4165 cUnit->firstChainingLIR[i] = (LIR *) &labelList[blockIdList[0]];
4167 for (j = 0; j < chainingListByType[i].numUsed; j++) {
4168 int blockId = blockIdList[j];
4170 /* Align this chaining cell first */
4171 newLIR0(cUnit, ARM_PSEUDO_ALIGN4);
4173 /* Insert the pseudo chaining instruction */
4174 dvmCompilerAppendLIR(cUnit, (LIR *) &labelList[blockId]);
4177 switch (blockList[blockId]->blockType) {
4178 case CHAINING_CELL_NORMAL:
4179 handleNormalChainingCell(cUnit,
4180 blockList[blockId]->startOffset);
4182 case CHAINING_CELL_INVOKE_SINGLETON:
4183 handleInvokeSingletonChainingCell(cUnit,
4184 blockList[blockId]->containingMethod);
4186 case CHAINING_CELL_INVOKE_PREDICTED:
4187 handleInvokePredictedChainingCell(cUnit);
4189 case CHAINING_CELL_HOT:
4190 handleHotChainingCell(cUnit,
4191 blockList[blockId]->startOffset);
4193 #if defined(WITH_SELF_VERIFICATION) || defined(WITH_JIT_TUNING)
4194 case CHAINING_CELL_BACKWARD_BRANCH:
4195 handleBackwardBranchChainingCell(cUnit,
4196 blockList[blockId]->startOffset);
4206 dvmCompilerApplyGlobalOptimizations(cUnit);
4209 /* Accept the work and start compiling */
4210 bool dvmCompilerDoWork(CompilerWorkOrder *work)
4214 if (gDvmJit.codeCacheFull) {
4218 switch (work->kind) {
4219 case kWorkOrderMethod:
4220 res = dvmCompileMethod(work->info, &work->result);
4222 case kWorkOrderTrace:
4223 /* Start compilation with maximally allowed trace length */
4224 res = dvmCompileTrace(work->info, JIT_MAX_TRACE_LEN, &work->result);
4233 /* Architectural-specific debugging helpers go here */
4234 void dvmCompilerArchDump(void)
4236 /* Print compiled opcode in this VM instance */
4237 int i, start, streak;
4242 while (opcodeCoverage[i] == 0 && i < 256) {
4248 for (start = i++, streak = 1; i < 256; i++) {
4249 if (opcodeCoverage[i]) {
4253 sprintf(buf+strlen(buf), "%x,", start);
4255 sprintf(buf+strlen(buf), "%x-%x,", start, start + streak - 1);
4258 while (opcodeCoverage[i] == 0 && i < 256) {
4269 sprintf(buf+strlen(buf), "%x", start);
4271 sprintf(buf+strlen(buf), "%x-%x", start, start + streak - 1);
4275 LOGD("dalvik.vm.jit.op = %s", buf);
4279 /* Common initialization routine for an architecture family */
4280 bool dvmCompilerArchInit()
4284 for (i = 0; i < ARM_LAST; i++) {
4285 if (EncodingMap[i].opCode != i) {
4286 LOGE("Encoding order for %s is wrong: expecting %d, seeing %d",
4287 EncodingMap[i].name, i, EncodingMap[i].opCode);
4292 return compilerArchVariantInit();