}
SDOperand X86TargetLowering::LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG,
- bool isStdCall) {
+ unsigned CC) {
SDOperand Chain = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
ArgInRegs[i],
NumIntRegs, NumXMMRegs, 3,
ObjSize, ObjIntRegs, ObjXMMRegs,
- !isStdCall);
+ CC != CallingConv::X86_StdCall);
if (ObjSize > 4)
ArgIncrement = ObjSize;
ArgInRegs[i],
NumIntRegs, NumXMMRegs, 3,
ObjSize, ObjIntRegs, ObjXMMRegs,
- !isStdCall);
+ CC != CallingConv::X86_StdCall);
if (ObjSize > 4)
ArgIncrement = ObjSize;
case MVT::v2i64:
case MVT::v4f32:
case MVT::v2f64:
- assert(!isStdCall && "Unhandled argument type!");
RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg));
break;
}
// Create the CALLSEQ_END node.
unsigned NumBytesForCalleeToPush = 0;
- if (isStdCall) {
- if (isVarArg) {
+ if (CC == CallingConv::X86_StdCall) {
+ if (isVarArg)
NumBytesForCalleeToPush = NumSRetBytes;
- } else {
+ else
NumBytesForCalleeToPush = NumBytes;
- }
} else {
// If this is is a call to a struct-return function, the callee
// pops the hidden struct pointer, so we have to push it back.
// Handle result values, copying them out of physregs into vregs that we
// return.
- return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CallingConv::C, DAG),
- Op.ResNo);
+ return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo);
}
}
SDOperand
-X86TargetLowering::LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG) {
+X86TargetLowering::LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG,
+ unsigned CallingConv) {
SDOperand Chain = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
}
SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG,
- bool isFastCall) {
+ unsigned CC) {
SDOperand Chain = Op.getOperand(0);
bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
SDOperand Callee = Op.getOperand(4);
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
};
- unsigned GPRInd = (isFastCall ? 1 : 0);
+ bool isFastCall = CC == CallingConv::X86_FastCall;
+ unsigned GPRInd = isFastCall ? 1 : 0;
for (unsigned i = 0; i != NumOps; ++i) {
SDOperand Arg = Op.getOperand(5+2*i);
unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue();
if (Subtarget->is64Bit())
- return LowerX86_64CCCCallTo(Op, DAG);
+ return LowerX86_64CCCCallTo(Op, DAG, CallingConv);
else
switch (CallingConv) {
default:
assert(0 && "Unsupported calling convention");
case CallingConv::Fast:
if (EnableFastCC)
- return LowerFastCCCallTo(Op, DAG);
+ return LowerFastCCCallTo(Op, DAG, CallingConv);
// Falls through
case CallingConv::C:
- return LowerCCCCallTo(Op, DAG);
case CallingConv::X86_StdCall:
- return LowerCCCCallTo(Op, DAG, true);
+ return LowerCCCCallTo(Op, DAG, CallingConv);
case CallingConv::X86_FastCall:
- return LowerFastCCCallTo(Op, DAG, true);
+ return LowerFastCCCallTo(Op, DAG, CallingConv);
}
}
// C and StdCall Calling Convention implementation.
SDOperand LowerCCCArguments(SDOperand Op, SelectionDAG &DAG,
bool isStdCall = false);
- SDOperand LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG,
- bool isStdCall = false);
+ SDOperand LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC);
// X86-64 C Calling Convention implementation.
SDOperand LowerX86_64CCCArguments(SDOperand Op, SelectionDAG &DAG);
- SDOperand LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG,unsigned CC);
// Fast and FastCall Calling Convention implementation.
SDOperand LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG,
bool isFastCall = false);
- SDOperand LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG,
- bool isFastCall = false);
+ SDOperand LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC);
SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG);