kThumb2Dmb, /* dmb [1111001110111111100011110101] option[3-0] */
kThumb2LdrPcReln12, /* ldr rd,[pc,-#imm12] [1111100011011111] rt[15-12]
imm12[11-0] */
+ kThumb2RsbRRR, /* rsb [111010111101] rn[19..16] [0000] rd[11..8]
+ [0000] rm[3..0] */
kThumbUndefined, /* undefined [11011110xxxxxxxx] */
kArmLast,
} ArmOpcode;
kFmtUnused, -1, -1,
IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD,
"ldr", "r!0d, [r15pc, -#!1d]", 2),
+ ENCODING_MAP(kThumb2RsbRRR, 0xebd00000, /* setflags encoding */
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtShift, -1, -1,
+ IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
+ "rsb", "r!0d, r!1d, r!2d!3H", 2),
ENCODING_MAP(kThumbUndefined, 0xde00,
kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, NO_OPERAND,
} else {
// Reverse subtract: (src << (shift + 1)) - src.
assert(powerOfTwoMinusOne);
- // TODO: rsb dst, src, src lsl#lowestSetBit(lit + 1)
- int tReg = dvmCompilerAllocTemp(cUnit);
- opRegRegImm(cUnit, kOpLsl, tReg, rlSrc.lowReg, lowestSetBit(lit + 1));
- opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg, rlSrc.lowReg);
+ genMultiplyByShiftAndReverseSubtract(cUnit, rlSrc, rlResult, lowestSetBit(lit + 1));
}
storeValue(cUnit, rlDest, rlResult);
return true;
// to do a regular multiply.
opRegRegImm(cUnit, kOpMul, rlResult.lowReg, rlSrc.lowReg, lit);
}
+
+static void genMultiplyByShiftAndReverseSubtract(CompilationUnit *cUnit,
+ RegLocation rlSrc, RegLocation rlResult, int lit)
+{
+ int tReg = dvmCompilerAllocTemp(cUnit);
+ opRegRegImm(cUnit, kOpLsl, tReg, rlSrc.lowReg, lit);
+ opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg, rlSrc.lowReg);
+}
opRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlResult.lowReg, firstBit);
}
}
+
+static void genMultiplyByShiftAndReverseSubtract(CompilationUnit *cUnit,
+ RegLocation rlSrc, RegLocation rlResult, int lit)
+{
+ newLIR4(cUnit, kThumb2RsbRRR, rlResult.lowReg, rlSrc.lowReg, rlSrc.lowReg,
+ encodeShift(kArmLsl, lit));
+}