X86::CMOVA32rr, X86::CMOVAE32rr, X86::CMOVP32rr, X86::CMOVNP32rr,
};
static const unsigned CMOVTABFP[] = {
- X86::FCMOVE , X86::FCMOVNE, /*missing*/0, /*missing*/0,
- /*missing*/0, /*missing*/0, X86::FCMOVB , X86::FCMOVBE,
- X86::FCMOVA , X86::FCMOVAE, X86::FCMOVP , X86::FCMOVNP
+ X86::FpCMOVE, X86::FpCMOVNE, /*missing*/0, /*missing*/0,
+ /*missing*/0, /*missing*/ 0, X86::FpCMOVB, X86::FpCMOVBE,
+ X86::FpCMOVA, X86::FpCMOVAE, X86::FpCMOVP, X86::FpCMOVNP
};
static const int SSE_CMOVTAB[] = {
/*CMPEQ*/ 0, /*CMPNEQ*/ 4, /*missing*/ 0, /*missing*/ 0,
default: assert(0 && "Cannot select this type!");
case MVT::i16: Opc = X86::CMOVE16rr; break;
case MVT::i32: Opc = X86::CMOVE32rr; break;
- case MVT::f64: Opc = X86::FCMOVE; break;
+ case MVT::f64: Opc = X86::FpCMOVE; break;
}
} else {
// FIXME: CMP R, 0 -> TEST R, R
if (!X86ScalarSSE && (CN->isExactlyValue(+0.0) ||
CN->isExactlyValue(-0.0))) {
unsigned Reg = SelectExpr(LHS);
- BuildMI(BB, X86::FTST, 1).addReg(Reg);
+ BuildMI(BB, X86::FpTST, 1).addReg(Reg);
BuildMI(BB, X86::FNSTSW8r, 0);
BuildMI(BB, X86::SAHF, 1);
return;
case MVT::i16: Opc = X86::CMP16rr; break;
case MVT::i32: Opc = X86::CMP32rr; break;
case MVT::f32: Opc = X86::UCOMISSrr; break;
- case MVT::f64: Opc = X86ScalarSSE ? X86::UCOMISDrr : X86::FUCOMIr; break;
+ case MVT::f64: Opc = X86ScalarSSE ? X86::UCOMISDrr : X86::FpUCOMIr; break;
}
unsigned Tmp1, Tmp2;
if (getRegPressure(LHS) > getRegPressure(RHS)) {
if (cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) ||
cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
- BuildMI(BB, X86::FLD0, 0, Tmp1);
+ BuildMI(BB, X86::FpLD0, 0, Tmp1);
else if (cast<ConstantFPSDNode>(N)->isExactlyValue(+1.0) ||
cast<ConstantFPSDNode>(N)->isExactlyValue(-1.0))
- BuildMI(BB, X86::FLD1, 0, Tmp1);
+ BuildMI(BB, X86::FpLD1, 0, Tmp1);
else
assert(0 && "Unexpected constant!");
if (Tmp1 != Result)
- BuildMI(BB, X86::FCHS, 1, Result).addReg(Tmp1);
+ BuildMI(BB, X86::FpCHS, 1, Result).addReg(Tmp1);
return Result;
case ISD::Constant:
switch (N.getValueType()) {
case ISD::UNDEF:
if (Node->getValueType(0) == MVT::f64) {
// FIXME: SHOULD TEACH STACKIFIER ABOUT UNDEF VALUES!
- BuildMI(BB, X86::FLD0, 0, Result);
+ BuildMI(BB, X86::FpLD0, 0, Result);
} else {
BuildMI(BB, X86::IMPLICIT_DEF, 0, Result);
}
switch (SrcTy) {
case MVT::i32:
addFrameReference(BuildMI(BB, X86::MOV32mr, 5), FrameIdx).addReg(Tmp1);
- addFrameReference(BuildMI(BB, X86::FILD32m, 5, Result), FrameIdx);
+ addFrameReference(BuildMI(BB, X86::FpILD32m, 5, Result), FrameIdx);
break;
case MVT::i16:
addFrameReference(BuildMI(BB, X86::MOV16mr, 5), FrameIdx).addReg(Tmp1);
- addFrameReference(BuildMI(BB, X86::FILD16m, 5, Result), FrameIdx);
+ addFrameReference(BuildMI(BB, X86::FpILD16m, 5, Result), FrameIdx);
break;
default: break; // No promotion required.
}
assert(Op1.getOpcode() == ISD::LOAD && "SSE load not promoted");
Opc = X86::ADDSDrm;
} else {
- Opc = Op1.getOpcode() == ISD::LOAD ? X86::FADD64m : X86::FADD32m;
+ Opc = Op1.getOpcode() == ISD::LOAD ? X86::FpADD64m : X86::FpADD32m;
}
break;
}
Opc = (N.getValueType() == MVT::f32) ? X86::SQRTSSrr : X86::SQRTSDrr;
BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
} else {
- BuildMI(BB, X86::FSQRT, 1, Result).addReg(Tmp1);
+ BuildMI(BB, X86::FpSQRT, 1, Result).addReg(Tmp1);
}
return Result;
Tmp1 = SelectExpr(Node->getOperand(0));
switch (N.getOpcode()) {
default: assert(0 && "Unreachable!");
- case ISD::FABS: BuildMI(BB, X86::FABS, 1, Result).addReg(Tmp1); break;
- case ISD::FNEG: BuildMI(BB, X86::FCHS, 1, Result).addReg(Tmp1); break;
- case ISD::FSIN: BuildMI(BB, X86::FSIN, 1, Result).addReg(Tmp1); break;
- case ISD::FCOS: BuildMI(BB, X86::FCOS, 1, Result).addReg(Tmp1); break;
+ case ISD::FABS: BuildMI(BB, X86::FpABS, 1, Result).addReg(Tmp1); break;
+ case ISD::FNEG: BuildMI(BB, X86::FpCHS, 1, Result).addReg(Tmp1); break;
+ case ISD::FSIN: BuildMI(BB, X86::FpSIN, 1, Result).addReg(Tmp1); break;
+ case ISD::FCOS: BuildMI(BB, X86::FpCOS, 1, Result).addReg(Tmp1); break;
}
return Result;
case ISD::XOR: {
static const unsigned SUBTab[] = {
X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, 0,
- X86::SUB8rm, X86::SUB16rm, X86::SUB32rm, X86::FSUB32m, X86::FSUB64m,
+ X86::SUB8rm, X86::SUB16rm, X86::SUB32rm, X86::FpSUB32m, X86::FpSUB64m,
X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB , X86::FpSUB,
};
static const unsigned SSE_SUBTab[] = {
};
static const unsigned MULTab[] = {
0, X86::IMUL16rri, X86::IMUL32rri, 0, 0,
- 0, X86::IMUL16rm , X86::IMUL32rm, X86::FMUL32m, X86::FMUL64m,
+ 0, X86::IMUL16rm , X86::IMUL32rm, X86::FpMUL32m, X86::FpMUL64m,
0, X86::IMUL16rr , X86::IMUL32rr, X86::FpMUL , X86::FpMUL,
};
static const unsigned SSE_MULTab[] = {
// For FP, emit 'reverse' subract, with a memory operand.
if (N.getValueType() == MVT::f64 && !X86ScalarSSE) {
if (Op0.getOpcode() == ISD::EXTLOAD)
- Opc = X86::FSUBR32m;
+ Opc = X86::FpSUBR32m;
else
- Opc = X86::FSUBR64m;
+ Opc = X86::FpSUBR64m;
X86AddressMode AM;
EmitFoldedLoad(Op0, AM);
// Check for reversed and unreversed DIV.
if (isFoldableLoad(N.getOperand(0), N.getOperand(1), true)) {
if (N.getOperand(0).getOpcode() == ISD::EXTLOAD)
- Opc = X86::FDIVR32m;
+ Opc = X86::FpDIVR32m;
else
- Opc = X86::FDIVR64m;
+ Opc = X86::FpDIVR64m;
X86AddressMode AM;
EmitFoldedLoad(N.getOperand(0), AM);
Tmp1 = SelectExpr(N.getOperand(1));
} else if (isFoldableLoad(N.getOperand(1), N.getOperand(0), true) &&
N.getOperand(1).getOpcode() == ISD::LOAD) {
if (N.getOperand(1).getOpcode() == ISD::EXTLOAD)
- Opc = X86::FDIV32m;
+ Opc = X86::FpDIV32m;
else
- Opc = X86::FDIV64m;
+ Opc = X86::FpDIV64m;
X86AddressMode AM;
EmitFoldedLoad(N.getOperand(1), AM);
Tmp1 = SelectExpr(N.getOperand(0));
if (X86ScalarSSE) {
Opc = X86::MOVSDrm;
} else {
- Opc = X86::FLD64m;
+ Opc = X86::FpLD64m;
ContainsFPCode = true;
}
break;
Select(Chain);
}
- addFullAddress(BuildMI(BB, X86::FILD64m, 4, Result), AM);
+ addFullAddress(BuildMI(BB, X86::FpILD64m, 4, Result), AM);
}
return Result;
unsigned CPIdx = BB->getParent()->getConstantPool()->
getConstantPoolIndex(CP->get());
- addConstantPoolReference(BuildMI(BB, X86::FLD32m, 4, Result), CPIdx);
+ addConstantPoolReference(BuildMI(BB, X86::FpLD32m, 4, Result), CPIdx);
return Result;
}
case MVT::f64:
assert(cast<VTSDNode>(Node->getOperand(3))->getVT() == MVT::f32 &&
"Bad EXTLOAD!");
- addFullAddress(BuildMI(BB, X86::FLD32m, 5, Result), AM);
+ addFullAddress(BuildMI(BB, X86::FpLD32m, 5, Result), AM);
break;
case MVT::i32:
switch (cast<VTSDNode>(Node->getOperand(3))->getVT()) {
unsigned Size = MVT::getSizeInBits(MVT::f64)/8;
MachineFunction *F = BB->getParent();
int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
- addFrameReference(BuildMI(BB, X86::FST64m, 5), FrameIdx).addReg(X86::FP0);
+ addFrameReference(BuildMI(BB, X86::FpST64m, 5), FrameIdx).addReg(X86::FP0);
addFrameReference(BuildMI(BB, X86::MOVSDrm, 4, Result), FrameIdx);
break;
} else {
MachineFunction *F = BB->getParent();
int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
addFrameReference(BuildMI(BB, X86::MOVSSmr, 5), FrameIdx).addReg(Tmp1);
- addFrameReference(BuildMI(BB, X86::FLD32m, 4, X86::FP0), FrameIdx);
+ addFrameReference(BuildMI(BB, X86::FpLD32m, 4, X86::FP0), FrameIdx);
BuildMI(BB, X86::FpSETRESULT, 1).addReg(X86::FP0);
ContainsFPCode = true;
} else {
MachineFunction *F = BB->getParent();
int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
addFrameReference(BuildMI(BB, X86::MOVSDmr, 5), FrameIdx).addReg(Tmp1);
- addFrameReference(BuildMI(BB, X86::FLD64m, 4, X86::FP0), FrameIdx);
+ addFrameReference(BuildMI(BB, X86::FpLD64m, 4, X86::FP0), FrameIdx);
BuildMI(BB, X86::FpSETRESULT, 1).addReg(X86::FP0);
ContainsFPCode = true;
} else {
// Get the X86 opcode to use.
switch (N.getOpcode()) {
- case X86ISD::FP_TO_INT16_IN_MEM: Tmp1 = X86::FIST16m; break;
- case X86ISD::FP_TO_INT32_IN_MEM: Tmp1 = X86::FIST32m; break;
- case X86ISD::FP_TO_INT64_IN_MEM: Tmp1 = X86::FISTP64m; break;
+ case X86ISD::FP_TO_INT16_IN_MEM: Tmp1 = X86::FpIST16m; break;
+ case X86ISD::FP_TO_INT32_IN_MEM: Tmp1 = X86::FpIST32m; break;
+ case X86ISD::FP_TO_INT64_IN_MEM: Tmp1 = X86::FpIST64m; break;
}
addFullAddress(BuildMI(BB, Tmp1, 5), AM).addReg(ValReg);
case MVT::i1: Opc = X86::MOV8mr; break;
case MVT::f32:
assert(!X86ScalarSSE && "Cannot truncstore scalar SSE regs");
- Opc = X86::FST32m; break;
+ Opc = X86::FpST32m; break;
}
std::vector<std::pair<unsigned, unsigned> > RP;
case MVT::i16: Opc = X86::MOV16mr; break;
case MVT::i32: Opc = X86::MOV32mr; break;
case MVT::f32: Opc = X86::MOVSSmr; break;
- case MVT::f64: Opc = X86ScalarSSE ? X86::MOVSDmr : X86::FST64m; break;
+ case MVT::f64: Opc = X86ScalarSSE ? X86::MOVSDmr : X86::FpST64m; break;
}
std::vector<std::pair<unsigned, unsigned> > RP;
// Stack-based Floating point support
//===----------------------------------------------------------------------===//
-// FIXME: These need to indicate mod/ref sets for FP regs... & FP 'TOP'
+// Floating point support. All FP Stack operations are represented with two
+// instructions here. The first instruction, generated by the instruction
+// selector, uses "RFP" registers: a traditional register file to reference
+// floating point values. These instructions are all psuedo instructions and
+// use the "Fp" prefix. The second instruction is defined with FPI, which is
+// the actual instruction emitted by the assembler. The FP stackifier pass
+// converts one to the other after register allocation occurs.
+//
+// Note that the FpI instruction should have instruction selection info (e.g.
+// a pattern) and the FPI instruction should have emission info (e.g. opcode
+// encoding and asm printing info).
+
+// FPI - Floating Point Instruction template.
+class FPI<bits<8> o, Format F, dag ops, string asm> : I<o, F, ops, asm, []> {}
-// Floating point instruction template
-class FPI<bits<8> o, Format F, FPFormat fp, dag ops, string asm,
- list<dag> pattern>
- : X86Inst<o, F, NoImm, ops, asm> {
+// FpI - Floating Point Psuedo Instruction template.
+class FpI<dag ops, FPFormat fp, list<dag> pattern>
+ : X86Inst<0, Pseudo, NoImm, ops, ""> {
let FPForm = fp; let FPFormBits = FPForm.Value;
let Pattern = pattern;
}
-// Pseudo instructions for floating point. We use these pseudo instructions
-// because they can be expanded by the fp spackifier into one of many different
-// forms of instructions for doing these operations. Until the stackifier runs,
-// we prefer to be abstract.
-def FpMOV : FPI<0, Pseudo, SpecialFP,
- (ops RFP:$dst, RFP:$src), "", []>; // f1 = fmov f2
-def FpADD : FPI<0, Pseudo, TwoArgFP ,
- (ops RFP:$dst, RFP:$src1, RFP:$src2), "",
- []>; // f1 = fadd f2, f3
-def FpSUB : FPI<0, Pseudo, TwoArgFP ,
- (ops RFP:$dst, RFP:$src1, RFP:$src2), "",
- []>; // f1 = fsub f2, f3
-def FpMUL : FPI<0, Pseudo, TwoArgFP ,
- (ops RFP:$dst, RFP:$src1, RFP:$src2), "",
- []>; // f1 = fmul f2, f3
-def FpDIV : FPI<0, Pseudo, TwoArgFP ,
- (ops RFP:$dst, RFP:$src1, RFP:$src2), "",
- []>; // f1 = fdiv f2, f3
-
-def FpLD32m : FPI<0xD9, MRM0m, ZeroArgFP,
- (ops RFP:$dst, f32mem:$src),
- "fld{s} $src",
- [(set RFP:$dst, (X86fld addr:$src, f32))]>;
-
-def FpLD64m : FPI<0xDD, MRM0m, ZeroArgFP,
- (ops RFP:$dst, f64mem:$src),
- "fld{l} $src",
- [(set RFP:$dst, (X86fld addr:$src, f64))]>;
-
-def FpGETRESULT : FPI<0, Pseudo, SpecialFP, (ops RFP:$dst), "", []>,
- Imp<[ST0], []>; // FPR = ST(0)
-
-def FpSETRESULT : FPI<0, Pseudo, SpecialFP, (ops RFP:$src), "",
- [(X86fpset RFP:$src)]>,
- Imp<[], [ST0]>; // ST(0) = FPR
-
-// FADD reg, mem: Before stackification, these are represented by:
-// R1 = FADD* R2, [mem]
-def FADD32m : FPI<0xD8, MRM0m, OneArgFPRW, // ST(0) = ST(0) + [mem32real]
- (ops f32mem:$src, variable_ops),
- "fadd{s} $src", []>;
-def FADD64m : FPI<0xDC, MRM0m, OneArgFPRW, // ST(0) = ST(0) + [mem64real]
- (ops f64mem:$src, variable_ops),
- "fadd{l} $src", []>;
-//def FIADD16m : FPI<0xDE, MRM0m, OneArgFPRW>; // ST(0) = ST(0) + [mem16int]
-//def FIADD32m : FPI<0xDA, MRM0m, OneArgFPRW>; // ST(0) = ST(0) + [mem32int]
-
-// FMUL reg, mem: Before stackification, these are represented by:
-// R1 = FMUL* R2, [mem]
-def FMUL32m : FPI<0xD8, MRM1m, OneArgFPRW, // ST(0) = ST(0) * [mem32real]
- (ops f32mem:$src, variable_ops),
- "fmul{s} $src", []>;
-def FMUL64m : FPI<0xDC, MRM1m, OneArgFPRW, // ST(0) = ST(0) * [mem64real]
- (ops f64mem:$src, variable_ops),
- "fmul{l} $src", []>;
-// ST(0) = ST(0) * [mem16int]
-//def FIMUL16m : FPI16m<"fimul", 0xDE, MRM1m, OneArgFPRW>;
-// ST(0) = ST(0) * [mem32int]
-//def FIMUL32m : FPI32m<"fimul", 0xDA, MRM1m, OneArgFPRW>;
-
-// FSUB reg, mem: Before stackification, these are represented by:
-// R1 = FSUB* R2, [mem]
-def FSUB32m : FPI<0xD8, MRM4m, OneArgFPRW, // ST(0) = ST(0) - [mem32real]
- (ops f32mem:$src, variable_ops),
- "fsub{s} $src", []>;
-def FSUB64m : FPI<0xDC, MRM4m, OneArgFPRW, // ST(0) = ST(0) - [mem64real]
- (ops f64mem:$src, variable_ops),
- "fsub{l} $src", []>;
-// ST(0) = ST(0) - [mem16int]
-//def FISUB16m : FPI16m<"fisub", 0xDE, MRM4m, OneArgFPRW>;
-// ST(0) = ST(0) - [mem32int]
-//def FISUB32m : FPI32m<"fisub", 0xDA, MRM4m, OneArgFPRW>;
-
-// FSUBR reg, mem: Before stackification, these are represented by:
-// R1 = FSUBR* R2, [mem]
-
-// Note that the order of operands does not reflect the operation being
-// performed.
-def FSUBR32m : FPI<0xD8, MRM5m, OneArgFPRW, // ST(0) = [mem32real] - ST(0)
- (ops f32mem:$src, variable_ops),
- "fsubr{s} $src", []>;
-def FSUBR64m : FPI<0xDC, MRM5m, OneArgFPRW, // ST(0) = [mem64real] - ST(0)
- (ops f64mem:$src, variable_ops),
- "fsubr{l} $src", []>;
-// ST(0) = [mem16int] - ST(0)
-//def FISUBR16m : FPI16m<"fisubr", 0xDE, MRM5m, OneArgFPRW>;
-// ST(0) = [mem32int] - ST(0)
-//def FISUBR32m : FPI32m<"fisubr", 0xDA, MRM5m, OneArgFPRW>;
-
-// FDIV reg, mem: Before stackification, these are represented by:
-// R1 = FDIV* R2, [mem]
-def FDIV32m : FPI<0xD8, MRM6m, OneArgFPRW, // ST(0) = ST(0) / [mem32real]
- (ops f32mem:$src, variable_ops),
- "fdiv{s} $src", []>;
-def FDIV64m : FPI<0xDC, MRM6m, OneArgFPRW, // ST(0) = ST(0) / [mem64real]
- (ops f64mem:$src, variable_ops),
- "fdiv{l} $src", []>;
-// ST(0) = ST(0) / [mem16int]
-//def FIDIV16m : FPI16m<"fidiv", 0xDE, MRM6m, OneArgFPRW>;
-// ST(0) = ST(0) / [mem32int]
-//def FIDIV32m : FPI32m<"fidiv", 0xDA, MRM6m, OneArgFPRW>;
-
-// FDIVR reg, mem: Before stackification, these are represented by:
-// R1 = FDIVR* R2, [mem]
-// Note that the order of operands does not reflect the operation being
-// performed.
-def FDIVR32m : FPI<0xD8, MRM7m, OneArgFPRW, // ST(0) = [mem32real] / ST(0)
- (ops f32mem:$src, variable_ops),
- "fdivr{s} $src", []>;
-def FDIVR64m : FPI<0xDC, MRM7m, OneArgFPRW, // ST(0) = [mem64real] / ST(0)
- (ops f64mem:$src, variable_ops),
- "fdivr{l} $src", []>;
-// ST(0) = [mem16int] / ST(0)
-//def FIDIVR16m : FPI16m<"fidivr", 0xDE, MRM7m, OneArgFPRW>;
-// ST(0) = [mem32int] / ST(0)
-//def FIDIVR32m : FPI32m<"fidivr", 0xDA, MRM7m, OneArgFPRW>;
-
-
-// Floating point cmovs...
-let isTwoAddress = 1, Uses = [ST0], Defs = [ST0] in {
- def FCMOVB : FPI<0xC0, AddRegFrm, CondMovFP,
- (ops RST:$op, variable_ops),
- "fcmovb {$op, %ST(0)|%ST(0), $op}", []>, DA;
- def FCMOVBE : FPI<0xD0, AddRegFrm, CondMovFP,
- (ops RST:$op, variable_ops),
- "fcmovbe {$op, %ST(0)|%ST(0), $op}", []>, DA;
- def FCMOVE : FPI<0xC8, AddRegFrm, CondMovFP,
- (ops RST:$op, variable_ops),
- "fcmove {$op, %ST(0)|%ST(0), $op}", []>, DA;
- def FCMOVP : FPI<0xD8, AddRegFrm, CondMovFP,
- (ops RST:$op, variable_ops),
- "fcmovu {$op, %ST(0)|%ST(0), $op}", []>, DA;
- def FCMOVAE : FPI<0xC0, AddRegFrm, CondMovFP,
- (ops RST:$op, variable_ops),
- "fcmovae {$op, %ST(0)|%ST(0), $op}", []>, DB;
- def FCMOVA : FPI<0xD0, AddRegFrm, CondMovFP,
- (ops RST:$op, variable_ops),
- "fcmova {$op, %ST(0)|%ST(0), $op}", []>, DB;
- def FCMOVNE : FPI<0xC8, AddRegFrm, CondMovFP,
- (ops RST:$op, variable_ops),
- "fcmovne {$op, %ST(0)|%ST(0), $op}", []>, DB;
- def FCMOVNP : FPI<0xD8, AddRegFrm, CondMovFP,
- (ops RST:$op, variable_ops),
- "fcmovnu {$op, %ST(0)|%ST(0), $op}", []>, DB;
-}
-
-// Floating point loads & stores...
-// FIXME: these are all marked variable_ops because they have an implicit
-// destination. Instructions like FILD* that are generated by the instruction
-// selector (not the fp stackifier) need more accurate operand accounting.
-def FLDrr : FPI<0xC0, AddRegFrm, NotFP,
- (ops RST:$src, variable_ops),
- "fld $src", []>, D9;
-def FLD32m : FPI<0xD9, MRM0m, ZeroArgFP,
- (ops f32mem:$src, variable_ops),
- "fld{s} $src", []>;
-def FLD64m : FPI<0xDD, MRM0m, ZeroArgFP,
- (ops f64mem:$src, variable_ops),
- "fld{l} $src", []>;
-def FLD80m : FPI<0xDB, MRM5m, ZeroArgFP,
- (ops f80mem:$src, variable_ops),
- "fld{t} $src", []>;
-def FILD16m : FPI<0xDF, MRM0m, ZeroArgFP,
- (ops i16mem:$src, variable_ops),
- "fild{s} $src", []>;
-def FILD32m : FPI<0xDB, MRM0m, ZeroArgFP,
- (ops i32mem:$src, variable_ops),
- "fild{l} $src", []>;
-def FILD64m : FPI<0xDF, MRM5m, ZeroArgFP,
- (ops i64mem:$src, variable_ops),
- "fild{ll} $src", []>;
-
-def FSTrr : FPI<0xD0, AddRegFrm, NotFP,
- (ops RST:$op, variable_ops),
- "fst $op", []>, DD;
-def FSTPrr : FPI<0xD8, AddRegFrm, NotFP,
- (ops RST:$op, variable_ops),
- "fstp $op", []>, DD;
-def FST32m : FPI<0xD9, MRM2m, OneArgFP,
- (ops f32mem:$op, variable_ops),
- "fst{s} $op", []>;
-def FST64m : FPI<0xDD, MRM2m, OneArgFP,
- (ops f64mem:$op, variable_ops),
- "fst{l} $op", []>;
-def FSTP32m : FPI<0xD9, MRM3m, OneArgFP,
- (ops f32mem:$op, variable_ops),
- "fstp{s} $op", []>;
-def FSTP64m : FPI<0xDD, MRM3m, OneArgFP,
- (ops f64mem:$op, variable_ops),
- "fstp{l} $op", []>;
-def FSTP80m : FPI<0xDB, MRM7m, OneArgFP,
- (ops f80mem:$op, variable_ops),
- "fstp{t} $op", []>;
-
-def FIST16m : FPI<0xDF, MRM2m , OneArgFP,
- (ops i16mem:$op, variable_ops),
- "fist{s} $op", []>;
-def FIST32m : FPI<0xDB, MRM2m , OneArgFP,
- (ops i32mem:$op, variable_ops),
- "fist{l} $op", []>;
-def FISTP16m : FPI<0xDF, MRM3m , NotFP ,
- (ops i16mem:$op, variable_ops),
- "fistp{s} $op", []>;
-def FISTP32m : FPI<0xDB, MRM3m , NotFP ,
- (ops i32mem:$op, variable_ops),
- "fistp{l} $op", []>;
-def FISTP64m : FPI<0xDF, MRM7m , OneArgFP,
- (ops i64mem:$op, variable_ops),
- "fistp{ll} $op", []>;
-
-def FXCH : FPI<0xC8, AddRegFrm, NotFP,
- (ops RST:$op), "fxch $op", []>, D9; // fxch ST(i), ST(0)
-
-// Floating point constant loads...
-def FLD0 : FPI<0xEE, RawFrm, ZeroArgFP, (ops variable_ops), "fldz", []>, D9;
-def FLD1 : FPI<0xE8, RawFrm, ZeroArgFP, (ops variable_ops), "fld1", []>, D9;
-
-
-// Unary operations...
-def FCHS : FPI<0xE0, RawFrm, OneArgFPRW, // f1 = fchs f2
- (ops variable_ops),
- "fchs", []>, D9;
-def FABS : FPI<0xE1, RawFrm, OneArgFPRW, // f1 = fabs f2
- (ops variable_ops),
- "fabs", []>, D9;
-def FSQRT : FPI<0xFA, RawFrm, OneArgFPRW, // fsqrt ST(0)
- (ops variable_ops),
- "fsqrt", []>, D9;
-def FSIN : FPI<0xFE, RawFrm, OneArgFPRW, // fsin ST(0)
- (ops variable_ops),
- "fsin", []>, D9;
-def FCOS : FPI<0xFF, RawFrm, OneArgFPRW, // fcos ST(0)
- (ops variable_ops),
- "fcos", []>, D9;
-def FTST : FPI<0xE4, RawFrm, OneArgFP , // ftst ST(0)
- (ops variable_ops),
- "ftst", []>, D9;
-
-// Binary arithmetic operations...
-class FPST0rInst<bits<8> o, dag ops, string asm>
- : I<o, AddRegFrm, ops, asm, []>, D8 {
- list<Register> Uses = [ST0];
- list<Register> Defs = [ST0];
-}
-class FPrST0Inst<bits<8> o, dag ops, string asm>
- : I<o, AddRegFrm, ops, asm, []>, DC {
- list<Register> Uses = [ST0];
-}
-class FPrST0PInst<bits<8> o, dag ops, string asm>
- : I<o, AddRegFrm, ops, asm, []>, DE {
- list<Register> Uses = [ST0];
+// Random Pseudo Instructions.
+def FpGETRESULT : FpI<(ops RFP:$dst), SpecialFP, // FPR = ST(0)
+ []>;
+def FpSETRESULT : FpI<(ops RFP:$src), SpecialFP,
+ [(X86fpset RFP:$src)]>, Imp<[], [ST0]>; // ST(0) = FPR
+def FpMOV : FpI<(ops RFP:$dst, RFP:$src), SpecialFP,
+ []>; // f1 = fmov f2
+
+// Binary Ops with a memory source.
+def FpADD32m : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
+ []>; // ST(0) = ST(0) + [mem32]
+def FpADD64m : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
+ []>; // ST(0) = ST(0) + [mem32]
+def FpMUL32m : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
+ []>; // ST(0) = ST(0) * [mem32]
+def FpMUL64m : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
+ []>; // ST(0) = ST(0) * [mem32]
+def FpSUB32m : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
+ []>; // ST(0) = ST(0) - [mem32]
+def FpSUB64m : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
+ []>; // ST(0) = ST(0) - [mem32]
+def FpSUBR32m : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
+ []>; // ST(0) = [mem32] - ST(0)
+def FpSUBR64m : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
+ []>; // ST(0) = [mem32] - ST(0)
+def FpDIV32m : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
+ []>; // ST(0) = ST(0) / [mem32]
+def FpDIV64m : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
+ []>; // ST(0) = ST(0) / [mem32]
+def FpDIVR32m : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
+ []>; // ST(0) = [mem32] / ST(0)
+def FpDIVR64m : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
+ []>; // ST(0) = [mem32] / ST(0)
+
+
+def FADD32m : FPI<0xD8, MRM0m, (ops f32mem:$src), "fadd{s} $src">;
+def FADD64m : FPI<0xDC, MRM0m, (ops f64mem:$src), "fadd{l} $src">;
+def FMUL32m : FPI<0xD8, MRM1m, (ops f32mem:$src), "fmul{s} $src">;
+def FMUL64m : FPI<0xDC, MRM1m, (ops f64mem:$src), "fmul{l} $src">;
+def FSUB32m : FPI<0xD8, MRM4m, (ops f32mem:$src), "fsub{s} $src">;
+def FSUB64m : FPI<0xDC, MRM4m, (ops f64mem:$src), "fsub{l} $src">;
+def FSUBR32m : FPI<0xD8, MRM5m, (ops f32mem:$src), "fsubr{s} $src">;
+def FSUBR64m : FPI<0xDC, MRM5m, (ops f64mem:$src), "fsubr{l} $src">;
+def FDIV32m : FPI<0xD8, MRM6m, (ops f32mem:$src), "fdiv{s} $src">;
+def FDIV64m : FPI<0xDC, MRM6m, (ops f64mem:$src), "fdiv{l} $src">;
+def FDIVR32m : FPI<0xD8, MRM7m, (ops f32mem:$src), "fdivr{s} $src">;
+def FDIVR64m : FPI<0xDC, MRM7m, (ops f64mem:$src), "fdivr{l} $src">;
+
+// FIXME: Implement these when we have a dag-dag isel!
+//def FIADD16m : FPI<0xDE, MRM0m>; // ST(0) = ST(0) + [mem16int]
+//def FIADD32m : FPI<0xDA, MRM0m>; // ST(0) = ST(0) + [mem32int]
+//def FIMUL16m : FPI<0xDE, MRM1m>; // ST(0) = ST(0) * [mem16]
+//def FIMUL32m : FPI<0xDA, MRM1m>; // ST(0) = ST(0) * [mem32]
+//def FISUB16m : FPI<0xDE, MRM4m>; // ST(0) = ST(0) - [mem16int]
+//def FISUB32m : FPI<0xDA, MRM4m>; // ST(0) = ST(0) - [mem32int]
+//def FISUBR16m : FPI<0xDE, MRM5m>; // ST(0) = [mem16int] - ST(0)
+//def FISUBR32m : FPI<0xDA, MRM5m>; // ST(0) = [mem32int] - ST(0)
+//def FIDIV16m : FPI<0xDE, MRM6m>; // ST(0) = ST(0) / [mem16int]
+//def FIDIV32m : FPI<0xDA, MRM6m>; // ST(0) = ST(0) / [mem32int]
+//def FIDIVR16m : FPI<0xDE, MRM7m>; // ST(0) = [mem16int] / ST(0)
+//def FIDIVR32m : FPI<0xDA, MRM7m>; // ST(0) = [mem32int] / ST(0)
+
+
+// Floating point cmovs.
+let isTwoAddress = 1 in {
+ def FpCMOVB : FpI<(ops RST:$dst, RFP:$src1, RFP:$src2), CondMovFP, []>;
+ def FpCMOVBE : FpI<(ops RST:$dst, RFP:$src1, RFP:$src2), CondMovFP, []>;
+ def FpCMOVE : FpI<(ops RST:$dst, RFP:$src1, RFP:$src2), CondMovFP, []>;
+ def FpCMOVP : FpI<(ops RST:$dst, RFP:$src1, RFP:$src2), CondMovFP, []>;
+ def FpCMOVAE : FpI<(ops RST:$dst, RFP:$src1, RFP:$src2), CondMovFP, []>;
+ def FpCMOVA : FpI<(ops RST:$dst, RFP:$src1, RFP:$src2), CondMovFP, []>;
+ def FpCMOVNE : FpI<(ops RST:$dst, RFP:$src1, RFP:$src2), CondMovFP, []>;
+ def FpCMOVNP : FpI<(ops RST:$dst, RFP:$src1, RFP:$src2), CondMovFP, []>;
}
-def FADDST0r : FPST0rInst <0xC0, (ops RST:$op),
- "fadd $op">;
-def FADDrST0 : FPrST0Inst <0xC0, (ops RST:$op),
- "fadd {%ST(0), $op|$op, %ST(0)}">;
-def FADDPrST0 : FPrST0PInst<0xC0, (ops RST:$op),
- "faddp $op">;
+def FCMOVB : FPI<0xC0, AddRegFrm, (ops RST:$op),
+ "fcmovb {$op, %ST(0)|%ST(0), $op}">, DA;
+def FCMOVBE : FPI<0xD0, AddRegFrm, (ops RST:$op),
+ "fcmovbe {$op, %ST(0)|%ST(0), $op}">, DA;
+def FCMOVE : FPI<0xC8, AddRegFrm, (ops RST:$op),
+ "fcmove {$op, %ST(0)|%ST(0), $op}">, DA;
+def FCMOVP : FPI<0xD8, AddRegFrm, (ops RST:$op),
+ "fcmovu {$op, %ST(0)|%ST(0), $op}">, DA;
+def FCMOVAE : FPI<0xC0, AddRegFrm, (ops RST:$op),
+ "fcmovae {$op, %ST(0)|%ST(0), $op}">, DB;
+def FCMOVA : FPI<0xD0, AddRegFrm, (ops RST:$op),
+ "fcmova {$op, %ST(0)|%ST(0), $op}">, DB;
+def FCMOVNE : FPI<0xC8, AddRegFrm, (ops RST:$op),
+ "fcmovne {$op, %ST(0)|%ST(0), $op}">, DB;
+def FCMOVNP : FPI<0xD8, AddRegFrm, (ops RST:$op),
+ "fcmovnu {$op, %ST(0)|%ST(0), $op}">, DB;
+
+// Floating point loads & stores.
+def FpLD32m : FpI<(ops RFP:$dst, f32mem:$src), ZeroArgFP,
+ [(set RFP:$dst, (X86fld addr:$src, f32))]>;
+def FpLD64m : FpI<(ops RFP:$dst, f64mem:$src), ZeroArgFP,
+ [(set RFP:$dst, (X86fld addr:$src, f64))]>;
+def FpILD16m : FpI<(ops RFP:$dst, i16mem:$src), ZeroArgFP,
+ []>;
+def FpILD32m : FpI<(ops RFP:$dst, i32mem:$src), ZeroArgFP,
+ []>;
+def FpILD64m : FpI<(ops RFP:$dst, i64mem:$src), ZeroArgFP,
+ []>;
+
+def FpST32m : FpI<(ops f32mem:$op, RFP:$src), OneArgFP, []>;
+def FpST64m : FpI<(ops f64mem:$op, RFP:$src), OneArgFP, []>;
+def FpSTP32m : FpI<(ops f32mem:$op, RFP:$src), OneArgFP, []>;
+def FpSTP64m : FpI<(ops f64mem:$op, RFP:$src), OneArgFP, []>;
+def FpIST16m : FpI<(ops i16mem:$op, RFP:$src), OneArgFP, []>;
+def FpIST32m : FpI<(ops i32mem:$op, RFP:$src), OneArgFP, []>;
+def FpIST64m : FpI<(ops i64mem:$op, RFP:$src), OneArgFP, []>;
+
+def FLD32m : FPI<0xD9, MRM0m, (ops f32mem:$src), "fld{s} $src">;
+def FLD64m : FPI<0xDD, MRM0m, (ops f64mem:$src), "fld{l} $src">;
+def FILD16m : FPI<0xDF, MRM0m, (ops i16mem:$src), "fild{s} $src">;
+def FILD32m : FPI<0xDB, MRM0m, (ops i32mem:$src), "fild{l} $src">;
+def FILD64m : FPI<0xDF, MRM5m, (ops i64mem:$src), "fild{ll} $src">;
+def FST32m : FPI<0xD9, MRM2m, (ops f32mem:$dst), "fst{s} $dst">;
+def FST64m : FPI<0xDD, MRM2m, (ops f64mem:$dst), "fst{l} $dst">;
+def FSTP32m : FPI<0xD9, MRM3m, (ops f32mem:$dst), "fstp{s} $dst">;
+def FSTP64m : FPI<0xDD, MRM3m, (ops f64mem:$dst), "fstp{l} $dst">;
+def FIST16m : FPI<0xDF, MRM2m, (ops i16mem:$dst), "fist{s} $dst">;
+def FIST32m : FPI<0xDB, MRM2m, (ops i32mem:$dst), "fist{l} $dst">;
+def FISTP16m : FPI<0xDF, MRM3m, (ops i16mem:$dst), "fistp{s} $dst">;
+def FISTP32m : FPI<0xDB, MRM3m, (ops i32mem:$dst), "fistp{l} $dst">;
+def FISTP64m : FPI<0xDF, MRM7m, (ops i64mem:$dst), "fistp{ll} $dst">;
+
+// FP Stack manipulation instructions.
+def FLDrr : FPI<0xC0, AddRegFrm, (ops RST:$op), "fld $op">, D9;
+def FSTrr : FPI<0xD0, AddRegFrm, (ops RST:$op), "fst $op">, DD;
+def FSTPrr : FPI<0xD8, AddRegFrm, (ops RST:$op), "fstp $op">, DD;
+def FXCH : FPI<0xC8, AddRegFrm, (ops RST:$op), "fxch $op">, D9;
+
+// Floating point constant loads.
+def FpLD0 : FpI<(ops RFP:$dst), ZeroArgFP, []>;
+def FpLD1 : FpI<(ops RFP:$dst), ZeroArgFP, []>;
+
+def FLD0 : FPI<0xEE, RawFrm, (ops), "fldz">, D9;
+def FLD1 : FPI<0xE8, RawFrm, (ops), "fld1">, D9;
+
+
+// Unary operations.
+def FpCHS : FpI<(ops RFP:$dst, RFP:$src), OneArgFPRW,
+ [(set RFP:$dst, (fneg RFP:$src))]>;
+def FpABS : FpI<(ops RFP:$dst, RFP:$src), OneArgFPRW,
+ [(set RFP:$dst, (fabs RFP:$src))]>;
+def FpSQRT : FpI<(ops RFP:$dst, RFP:$src), OneArgFPRW,
+ [(set RFP:$dst, (fsqrt RFP:$src))]>;
+def FpSIN : FpI<(ops RFP:$dst, RFP:$src), OneArgFPRW,
+ [(set RFP:$dst, (fsin RFP:$src))]>;
+def FpCOS : FpI<(ops RFP:$dst, RFP:$src), OneArgFPRW,
+ [(set RFP:$dst, (fcos RFP:$src))]>;
+def FpTST : FpI<(ops RFP:$src), OneArgFP,
+ []>;
+
+def FCHS : FPI<0xE0, RawFrm, (ops), "fchs">, D9;
+def FABS : FPI<0xE1, RawFrm, (ops), "fabs">, D9;
+def FSQRT : FPI<0xFA, RawFrm, (ops), "fsqrt">, D9;
+def FSIN : FPI<0xFE, RawFrm, (ops), "fsin">, D9;
+def FCOS : FPI<0xFF, RawFrm, (ops), "fcos">, D9;
+def FTST : FPI<0xE4, RawFrm, (ops), "ftst">, D9;
+
+
+
+// Add, Sub, Mul, Div.
+def FpADD : FpI<(ops RFP:$dst, RFP:$src1, RFP:$src2), TwoArgFP,
+ [(set RFP:$dst, (fadd RFP:$src1, RFP:$src2))]>;
+def FpSUB : FpI<(ops RFP:$dst, RFP:$src1, RFP:$src2), TwoArgFP,
+ [(set RFP:$dst, (fsub RFP:$src1, RFP:$src2))]>;
+def FpMUL : FpI<(ops RFP:$dst, RFP:$src1, RFP:$src2), TwoArgFP,
+ [(set RFP:$dst, (fmul RFP:$src1, RFP:$src2))]>;
+def FpDIV : FpI<(ops RFP:$dst, RFP:$src1, RFP:$src2), TwoArgFP,
+ [(set RFP:$dst, (fdiv RFP:$src1, RFP:$src2))]>;
+
+class FPST0rInst<bits<8> o, string asm>
+ : FPI<o, AddRegFrm, (ops RST:$op), asm>, D8;
+class FPrST0Inst<bits<8> o, string asm>
+ : FPI<o, AddRegFrm, (ops RST:$op), asm>, DC;
+class FPrST0PInst<bits<8> o, string asm>
+ : FPI<o, AddRegFrm, (ops RST:$op), asm>, DE;
// NOTE: GAS and apparently all other AT&T style assemblers have a broken notion
// of some of the 'reverse' forms of the fsub and fdiv instructions. As such,
// we have to put some 'r's in and take them out of weird places.
-def FSUBRST0r : FPST0rInst <0xE8, (ops RST:$op),
- "fsubr $op">;
-def FSUBrST0 : FPrST0Inst <0xE8, (ops RST:$op),
- "fsub{r} {%ST(0), $op|$op, %ST(0)}">;
-def FSUBPrST0 : FPrST0PInst<0xE8, (ops RST:$op),
- "fsub{r}p $op">;
-
-def FSUBST0r : FPST0rInst <0xE0, (ops RST:$op),
- "fsub $op">;
-def FSUBRrST0 : FPrST0Inst <0xE0, (ops RST:$op),
- "fsub{|r} {%ST(0), $op|$op, %ST(0)}">;
-def FSUBRPrST0 : FPrST0PInst<0xE0, (ops RST:$op),
- "fsub{|r}p $op">;
-
-def FMULST0r : FPST0rInst <0xC8, (ops RST:$op),
- "fmul $op">;
-def FMULrST0 : FPrST0Inst <0xC8, (ops RST:$op),
- "fmul {%ST(0), $op|$op, %ST(0)}">;
-def FMULPrST0 : FPrST0PInst<0xC8, (ops RST:$op),
- "fmulp $op">;
-
-def FDIVRST0r : FPST0rInst <0xF8, (ops RST:$op),
- "fdivr $op">;
-def FDIVrST0 : FPrST0Inst <0xF8, (ops RST:$op),
- "fdiv{r} {%ST(0), $op|$op, %ST(0)}">;
-def FDIVPrST0 : FPrST0PInst<0xF8, (ops RST:$op),
- "fdiv{r}p $op">;
-
-def FDIVST0r : FPST0rInst <0xF0, (ops RST:$op), // ST(0) = ST(0) / ST(i)
- "fdiv $op">;
-def FDIVRrST0 : FPrST0Inst <0xF0, (ops RST:$op), // ST(i) = ST(0) / ST(i)
- "fdiv{|r} {%ST(0), $op|$op, %ST(0)}">;
-def FDIVRPrST0 : FPrST0PInst<0xF0, (ops RST:$op), // ST(i) = ST(0) / ST(i), pop
- "fdiv{|r}p $op">;
-
-// Floating point compares
-def FUCOMr : FPI<0xE0, AddRegFrm, CompareFP, // FPSW = cmp ST(0) with ST(i)
- (ops RST:$reg, variable_ops),
- "fucom $reg", []>, DD, Imp<[ST0],[]>;
-def FUCOMPr : I<0xE8, AddRegFrm, // FPSW = cmp ST(0) with ST(i), pop
- (ops RST:$reg, variable_ops),
- "fucomp $reg", []>, DD, Imp<[ST0],[]>;
-def FUCOMPPr : I<0xE9, RawFrm, // cmp ST(0) with ST(1), pop, pop
- (ops variable_ops),
- "fucompp", []>, DA, Imp<[ST0],[]>;
-
-def FUCOMIr : FPI<0xE8, AddRegFrm, CompareFP, // CC = cmp ST(0) with ST(i)
- (ops RST:$reg, variable_ops),
- "fucomi {$reg, %ST(0)|%ST(0), $reg}", []>, DB, Imp<[ST0],[]>;
-def FUCOMIPr : I<0xE8, AddRegFrm, // CC = cmp ST(0) with ST(i), pop
- (ops RST:$reg, variable_ops),
- "fucomip {$reg, %ST(0)|%ST(0), $reg}", []>, DF, Imp<[ST0],[]>;
-
-
-// Floating point flag ops
+def FADDST0r : FPST0rInst <0xC0, "fadd $op">;
+def FADDrST0 : FPrST0Inst <0xC0, "fadd {%ST(0), $op|$op, %ST(0)}">;
+def FADDPrST0 : FPrST0PInst<0xC0, "faddp $op">;
+def FSUBRST0r : FPST0rInst <0xE8, "fsubr $op">;
+def FSUBrST0 : FPrST0Inst <0xE8, "fsub{r} {%ST(0), $op|$op, %ST(0)}">;
+def FSUBPrST0 : FPrST0PInst<0xE8, "fsub{r}p $op">;
+def FSUBST0r : FPST0rInst <0xE0, "fsub $op">;
+def FSUBRrST0 : FPrST0Inst <0xE0, "fsub{|r} {%ST(0), $op|$op, %ST(0)}">;
+def FSUBRPrST0 : FPrST0PInst<0xE0, "fsub{|r}p $op">;
+def FMULST0r : FPST0rInst <0xC8, "fmul $op">;
+def FMULrST0 : FPrST0Inst <0xC8, "fmul {%ST(0), $op|$op, %ST(0)}">;
+def FMULPrST0 : FPrST0PInst<0xC8, "fmulp $op">;
+def FDIVRST0r : FPST0rInst <0xF8, "fdivr $op">;
+def FDIVrST0 : FPrST0Inst <0xF8, "fdiv{r} {%ST(0), $op|$op, %ST(0)}">;
+def FDIVPrST0 : FPrST0PInst<0xF8, "fdiv{r}p $op">;
+def FDIVST0r : FPST0rInst <0xF0, "fdiv $op">;
+def FDIVRrST0 : FPrST0Inst <0xF0, "fdiv{|r} {%ST(0), $op|$op, %ST(0)}">;
+def FDIVRPrST0 : FPrST0PInst<0xF0, "fdiv{|r}p $op">;
+
+// Floating point compares.
+def FpUCOMr : FpI<(ops RST:$lhs, RST:$rhs), CompareFP,
+ []>; // FPSW = cmp ST(0) with ST(i)
+def FpUCOMIr : FpI<(ops RST:$lhs, RST:$rhs), CompareFP,
+ []>; // CC = cmp ST(0) with ST(i)
+
+def FUCOMr : FPI<0xE0, AddRegFrm, // FPSW = cmp ST(0) with ST(i)
+ (ops RST:$reg),
+ "fucom $reg">, DD, Imp<[ST0],[]>;
+def FUCOMPr : FPI<0xE8, AddRegFrm, // FPSW = cmp ST(0) with ST(i), pop
+ (ops RST:$reg),
+ "fucomp $reg">, DD, Imp<[ST0],[]>;
+def FUCOMPPr : FPI<0xE9, RawFrm, // cmp ST(0) with ST(1), pop, pop
+ (ops),
+ "fucompp">, DA, Imp<[ST0],[]>;
+
+def FUCOMIr : FPI<0xE8, AddRegFrm, // CC = cmp ST(0) with ST(i)
+ (ops RST:$reg),
+ "fucomi {$reg, %ST(0)|%ST(0), $reg}">, DB, Imp<[ST0],[]>;
+def FUCOMIPr : FPI<0xE8, AddRegFrm, // CC = cmp ST(0) with ST(i), pop
+ (ops RST:$reg),
+ "fucomip {$reg, %ST(0)|%ST(0), $reg}">, DF, Imp<[ST0],[]>;
+
+
+// Floating point flag ops.
def FNSTSW8r : I<0xE0, RawFrm, // AX = fp flags
(ops), "fnstsw", []>, DF, Imp<[],[AX]>;