((static_cast<int32_t>(sn) & 1)*B7) | B4;
Emit(encoding);
}
-#endif
+// Moved to ARM32::AssemblerARM32::vmovrs().
void Assembler::vmovrs(Register rt, SRegister sn, Condition cond) {
ASSERT(TargetCPUFeatures::vfp_supported());
ASSERT(sn != kNoSRegister);
((static_cast<int32_t>(sn) & 1)*B7) | B4;
Emit(encoding);
}
+#endif
void Assembler::vmovsrr(SRegister sm, Register rt, Register rt2,
return (AL << kConditionShift) | B24 | B21 |
((imm16 >> 4) << 8) | B6 | B5 | B4 | (imm16 & 0xf);
}
-#endif
+ // Not ported. PNaCl doesn't allow breakpoint instructions.
static uword GetBreakInstructionFiller() {
return BkptEncoding(0);
}
// Floating point instructions (VFPv3-D16 and VFPv3-D32 profiles).
-#if 0
+
// Moved to ARM32::AssemblerARM32::vmovsr().
void vmovsr(SRegister sn, Register rt, Condition cond = AL);
-#endif
+ // Moved to ARM32::AssemblerARM32::vmovrs().
void vmovrs(Register rt, SRegister sn, Condition cond = AL);
+#endif
void vmovsrr(SRegister sm, Register rt, Register rt2, Condition cond = AL);
void vmovrrs(Register rt, Register rt2, SRegister sm, Condition cond = AL);
void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond = AL);
emitVFPddd(Cond, VmovddOpcode, Dd, D0, Dm);
}
+void AssemblerARM32::vmovrs(const Operand *OpRt, const Operand *OpSn,
+ CondARM32::Cond Cond) {
+ // VMOV (between ARM core register and single-precision register)
+ // ARM section A8.8.343, encoding A1.
+ //
+ // vmov<c> <Rt>, <Sn>
+ //
+ // cccc11100001nnnntttt1010N0010000 where cccc=Cond, nnnnN = Sn, and tttt=Rt.
+ constexpr const char *Vmovrs = "vmovrs";
+ IValueT Rt = encodeGPRegister(OpRt, "Rt", Vmovrs);
+ IValueT Sn = encodeSRegister(OpSn, "Sn", Vmovrs);
+ assert(CondARM32::isDefined(Cond));
+ IValueT Encoding = (encodeCondition(Cond) << kConditionShift) | B27 | B26 |
+ B25 | B20 | B11 | B9 | B4 | (getXXXXInRegXXXXY(Sn) << 16) |
+ (Rt << kRdShift) | (getYInRegXXXXY(Sn) << 7);
+ emitInst(Encoding);
+}
+
void AssemblerARM32::vmovs(const Operand *OpSd,
const OperandARM32FlexFpImm *OpFpImm,
CondARM32::Cond Cond) {
//
// vmov<c> <Sn>, <Rt>
//
- // cccc1110000onnnntttt1010N0010000 where cccc=Cond, nnnnN = Sn, and tttt=Rt.
+ // cccc11100000nnnntttt1010N0010000 where cccc=Cond, nnnnN = Sn, and tttt=Rt.
constexpr const char *Vmovsr = "vmovsr";
IValueT Sn = encodeSRegister(OpSn, "Sn", Vmovsr);
IValueT Rt = encodeGPRegister(OpRt, "Rt", Vmovsr);
void vmovdd(const Operand *OpDd, const Operand *OpDm, CondARM32::Cond Cond);
+ void vmovrs(const Operand *OpRt, const Operand *OpSn, CondARM32::Cond Cond);
+
void vmovs(const Operand *OpSn, const OperandARM32FlexFpImm *OpFpImm,
CondARM32::Cond Cond);
// Move register to register.
Variable *Dest = getDest();
+ // TODO(kschimpf) Consider merging methods emitIAS.. methods into
+ // a single case statement.
switch (Dest->getType()) {
default:
// TODO(kschimpf): Fill this out more.
return Asm->setNeedsTextFixup();
+ case IceType_i1:
+ case IceType_i8:
+ case IceType_i16:
+ case IceType_i32:
+ assert(Src0->getType() == IceType_f32 && "Expected int to float move");
+ Asm->vmovrs(Dest, Src0, getPredicate());
+ return;
+ case IceType_i64:
+ assert(false && "i64 to float moves not handled here!");
+ return;
case IceType_f32:
switch (Src0->getType()) {
default:
- // TODO(kschimpf): Fill this out more?
- return Asm->setNeedsTextFixup();
+ assert(false && "Expected float to int move");
+ return;
+ case IceType_i1:
+ case IceType_i8:
+ case IceType_i16:
case IceType_i32:
return Asm->vmovsr(Dest, Src0, getPredicate());
}
--- /dev/null
+; Show that we can move between float (S) and integer (GPR) registers.
+
+; REQUIRES: allow_dump
+
+; Compile using standalone assembler.
+; RUN: %p2i --filetype=asm -i %s --target=arm32 --args -Om1 \
+; RUN: --reg-use=s20,r5,r6 | FileCheck %s --check-prefix=ASM
+
+; Show bytes in assembled standalone code.
+; RUN: %p2i --filetype=asm -i %s --target=arm32 --assemble --disassemble \
+; RUN: --args -Om1 --reg-use=s20,r5,r6 | FileCheck %s --check-prefix=DIS
+
+; Compile using integrated assembler.
+; RUN: %p2i --filetype=iasm -i %s --target=arm32 --args -Om1 \
+; RUN: --reg-use=s20,r5,r6 \
+; RUN: | FileCheck %s --check-prefix=IASM
+
+; Show bytes in assembled integrated code.
+; RUN: %p2i --filetype=iasm -i %s --target=arm32 --assemble --disassemble \
+; RUN: --args -Om1 --reg-use=s20,r5,r6 | FileCheck %s --check-prefix=DIS
+
+define internal void @FloatToI1() {
+; ASM-LABEL: FloatToI1:
+; DIS-LABEL: {{.+}} <FloatToI1>:
+
+ %v = fptoui float 0.0 to i1
+
+; ASM: vmov r5, s20
+; DIS: {{.+}}: ee1a5a10
+; IASM-NOT: vmov
+
+ ret void
+}
+
+define internal void @FloatToI8() {
+; ASM-LABEL: FloatToI8:
+; DIS-LABEL: {{.+}} <FloatToI8>:
+
+ %v = fptoui float 0.0 to i8
+
+; ASM: vmov r5, s20
+; DIS: {{.+}}: ee1a5a10
+; IASM-NOT: vmov
+
+ ret void
+}
+
+define internal void @FloatToI16() {
+; ASM-LABEL: FloatToI16:
+; DIS-LABEL: {{.+}} <FloatToI16>:
+
+ %v = fptoui float 0.0 to i16
+
+; ASM: vmov r5, s20
+; DIS: {{.+}}: ee1a5a10
+; IASM-NOT: vmov
+
+ ret void
+}
+
+define internal void @FloatToI32() {
+; ASM-LABEL: FloatToI32:
+; DIS-LABEL: {{.+}} <FloatToI32>:
+
+ %v = fptoui float 0.0 to i32
+
+; ASM: vmov r5, s20
+; DIS: {{.+}}: ee1a5a10
+; IASM-NOT: vmov
+
+ ret void
+}
+
+define internal float @I1ToFloat() {
+; ASM-LABEL: I1ToFloat:
+; DIS-LABEL: {{.+}} <I1ToFloat>:
+
+ %v = uitofp i1 1 to float
+
+; ASM: vmov s20, r5
+; DIS: {{.+}}: ee0a5a10
+; IASM-NOT: vmov
+
+ ret float %v
+}
+
+define internal float @I8ToFloat() {
+; ASM-LABEL: I8ToFloat:
+; DIS-LABEL: {{.+}} <I8ToFloat>:
+
+ %v = uitofp i8 1 to float
+
+; ASM: vmov s20, r5
+; DIS: {{.+}}: ee0a5a10
+; IASM-NOT: vmov
+
+ ret float %v
+}
+
+define internal float @I16ToFloat() {
+; ASM-LABEL: I16ToFloat:
+; DIS-LABEL: {{.+}} <I16ToFloat>:
+
+ %v = uitofp i16 1 to float
+
+; ASM: vmov s20, r5
+; DIS: {{.+}}: ee0a5a10
+; IASM-NOT: vmov
+
+ ret float %v
+}
+
+define internal float @I32ToFloat() {
+; ASM-LABEL: I32ToFloat:
+; DIS-LABEL: {{.+}} <I32ToFloat>:
+
+ %v = uitofp i32 17 to float
+
+; ASM: vmov s20, r5
+; DIS: {{.+}}: ee0a5a10
+; IASM-NOT: vmov
+
+ ret float %v
+}