Let the codegen recognized the nomerge attribute and disable branch folding when the attribute is given
Differential Revision: https://reviews.llvm.org/D79537
// known to be exact.
NoFPExcept = 1 << 14, // Instruction does not raise
// floatint-point exceptions.
+ NoMerge = 1 << 15, // Passes that drop source location info
+ // (e.g. branch folding) should skip
+ // this instruction.
};
private:
struct CallSiteDbgInfo {
CallSiteInfo CSInfo;
MDNode *HeapAllocSite = nullptr;
+ bool NoMerge = false;
};
DenseMap<const SDNode *, CallSiteDbgInfo> SDCallSiteDbgInfo;
return It->second.HeapAllocSite;
}
+ void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge) {
+ if (NoMerge)
+ SDCallSiteDbgInfo[Node].NoMerge = NoMerge;
+ }
+
+ bool getNoMergeSiteInfo(const SDNode *Node) {
+ auto I = SDCallSiteDbgInfo.find(Node);
+ if (I == SDCallSiteDbgInfo.end())
+ return false;
+ return I->second.NoMerge;
+ }
+
/// Return the current function's default denormal handling kind for the given
/// floating point type.
DenormalMode getDenormalMode(EVT VT) const {
bool IsConvergent : 1;
bool IsPatchPoint : 1;
bool IsPreallocated : 1;
+ bool NoMerge : 1;
// IsTailCall should be modified by implementations of
// TargetLowering::LowerCall that perform tail call conversions.
CallLoweringInfo(SelectionDAG &DAG)
: RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false),
- IsPatchPoint(false), IsPreallocated(false), DAG(DAG) {}
+ IsPatchPoint(false), IsPreallocated(false), NoMerge(false),
+ DAG(DAG) {}
CallLoweringInfo &setDebugLoc(const SDLoc &dl) {
DL = dl;
IsReturnValueUsed = !Call.use_empty();
RetSExt = Call.hasRetAttr(Attribute::SExt);
RetZExt = Call.hasRetAttr(Attribute::ZExt);
-
+ NoMerge = Call.hasFnAttr(Attribute::NoMerge);
+
Callee = Target;
CallConv = Call.getCallingConv();
MBBI1->isInlineAsm()) {
break;
}
+ if (MBBI1->getFlag(MachineInstr::NoMerge) ||
+ MBBI2->getFlag(MachineInstr::NoMerge))
+ break;
++TailLen;
I1 = MBBI1;
I2 = MBBI2;
OS << "exact ";
if (MI.getFlag(MachineInstr::NoFPExcept))
OS << "nofpexcept ";
+ if (MI.getFlag(MachineInstr::NoMerge))
+ OS << "nomerge ";
OS << TII->getName(MI.getOpcode());
if (I < E)
OS << "exact ";
if (getFlag(MachineInstr::NoFPExcept))
OS << "nofpexcept ";
+ if (getFlag(MachineInstr::NoMerge))
+ OS << "nomerge ";
// Print the opcode name.
if (TII)
DAG->getTarget().Options.EmitCallSiteInfo)
MF.addCallArgsForwardingRegs(MI, DAG->getSDCallSiteInfo(Node));
+ if (DAG->getNoMergeSiteInfo(Node)) {
+ MI->setFlag(MachineInstr::MIFlag::NoMerge);
+ }
+
return MI;
};
// Returns a chain and a flag for retval copy to use.
Chain = DAG.getNode(AArch64ISD::CALL, DL, NodeTys, Ops);
+ DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
InFlag = Chain.getValue(1);
DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
// Returns a chain and a flag for retval copy to use.
Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
+ DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
InFlag = Chain.getValue(1);
DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
+ DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge);
Glue = Chain.getValue(1);
// When performing tail call optimization the callee pops its arguments off
isIndirectCall(Callee, DAG, Subtarget, isPatchPoint),
// hasNest
Subtarget.is64BitELFABI() &&
- any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }));
+ any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }),
+ CLI.NoMerge);
if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
const bool IsPatchPoint : 1;
const bool IsIndirect : 1;
const bool HasNest : 1;
+ const bool NoMerge : 1;
CallFlags(CallingConv::ID CC, bool IsTailCall, bool IsVarArg,
- bool IsPatchPoint, bool IsIndirect, bool HasNest)
+ bool IsPatchPoint, bool IsIndirect, bool HasNest, bool NoMerge)
: CallConv(CC), IsTailCall(IsTailCall), IsVarArg(IsVarArg),
IsPatchPoint(IsPatchPoint), IsIndirect(IsIndirect),
- HasNest(HasNest) {}
+ HasNest(HasNest), NoMerge(NoMerge) {}
};
private:
}
Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
+ DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
Glue = Chain.getValue(1);
// Mark the end of the call, which is glued to the call itself.
Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
}
InFlag = Chain.getValue(1);
+ DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
// Save heapallocsite metadata.
--- /dev/null
+; RUN: llc < %s -mtriple=aarch64 -o - | FileCheck %s
+
+define void @foo(i32 %i) {
+entry:
+ switch i32 %i, label %if.end3 [
+ i32 5, label %if.then
+ i32 7, label %if.then2
+ ]
+
+if.then:
+ tail call void @bar() #0
+ br label %if.end3
+
+if.then2:
+ tail call void @bar() #0
+ br label %if.end3
+
+if.end3:
+ tail call void @bar() #0
+ ret void
+}
+
+declare void @bar()
+
+attributes #0 = { nomerge }
+
+; CHECK-LABEL: foo:
+; CHECK: // %bb.0: // %entry
+; CHECK: // %bb.1: // %entry
+; CHECK: // %bb.2: // %if.then
+; CHECK-NEXT: bl bar
+; CHECK: b bar
+; CHECK: .LBB0_3: // %if.then2
+; CHECK-NEXT: bl bar
+; CHECK: .LBB0_4: // %if.end3
+; CHECK: b bar
--- /dev/null
+; RUN: llc < %s -mtriple=arm -o - | FileCheck %s
+
+define void @foo(i32 %i) {
+entry:
+ switch i32 %i, label %if.end3 [
+ i32 5, label %if.then
+ i32 7, label %if.then2
+ ]
+
+if.then:
+ tail call void @bar() #0
+ br label %if.end3
+
+if.then2:
+ tail call void @bar() #0
+ br label %if.end3
+
+if.end3:
+ tail call void @bar() #0
+ ret void
+}
+
+declare void @bar()
+
+attributes #0 = { nomerge }
+
+; CHECK-LABEL: foo:
+; CHECK: @ %bb.0: @ %entry
+; CHECK: @ %bb.1: @ %entry
+; CHECK: @ %bb.2: @ %if.then
+; CHECK-NEXT: bl bar
+; CHECK: b bar
+; CHECK: .LBB0_3: @ %if.then2
+; CHECK-NEXT: bl bar
+; CHECK: .LBB0_4: @ %if.end3
+; CHECK: b bar
--- /dev/null
+; RUN: llc < %s -mtriple=powerpc -o - | FileCheck %s
+
+define void @foo(i32 %i) {
+entry:
+ switch i32 %i, label %if.end3 [
+ i32 5, label %if.then
+ i32 7, label %if.then2
+ ]
+
+if.then:
+ tail call void @bar() #0
+ br label %if.end3
+
+if.then2:
+ tail call void @bar() #0
+ br label %if.end3
+
+if.end3:
+ tail call void @bar() #0
+ ret void
+}
+
+declare void @bar()
+
+attributes #0 = { nomerge }
+
+; CHECK-LABEL: foo:
+; CHECK: # %bb.0: # %entry
+; CHECK: # %bb.1: # %entry
+; CHECK: # %bb.2: # %if.then
+; CHECK-NEXT: bl bar
+; CHECK: .LBB0_3: # %if.then2
+; CHECK-NEXT: bl bar
+; CHECK: .LBB0_4: # %if.end3
+; CHECK-NEXT: bl bar
--- /dev/null
+; RUN: llc < %s -mtriple=riscv64 -o - | FileCheck %s
+
+define void @foo(i32 %i) {
+entry:
+ switch i32 %i, label %if.end3 [
+ i32 5, label %if.then
+ i32 7, label %if.then2
+ ]
+
+if.then:
+ tail call void @bar() #0
+ br label %if.end3
+
+if.then2:
+ tail call void @bar() #0
+ br label %if.end3
+
+if.end3:
+ tail call void @bar() #0
+ ret void
+}
+
+declare void @bar()
+
+attributes #0 = { nomerge }
+
+; CHECK-LABEL: foo:
+; CHECK: # %bb.0: # %entry
+; CHECK: # %bb.1: # %entry
+; CHECK: # %bb.2: # %if.then
+; CHECK-NEXT: call bar
+; CHECK: .LBB0_3: # %if.then2
+; CHECK-NEXT: call bar
+; CHECK: .LBB0_4: # %if.end3
+; CHECK: tail bar
--- /dev/null
+; RUN: llc < %s -mtriple=x86_64 -o - | FileCheck %s
+
+define void @foo(i32 %i) {
+entry:
+ switch i32 %i, label %if.end3 [
+ i32 5, label %if.then
+ i32 7, label %if.then2
+ ]
+
+if.then:
+ tail call void @bar() #0
+ br label %if.end3
+
+if.then2:
+ tail call void @bar() #0
+ br label %if.end3
+
+if.end3:
+ tail call void @bar() #0
+ ret void
+}
+
+declare void @bar()
+
+attributes #0 = { nomerge }
+
+; CHECK-LABEL: foo:
+; CHECK: # %bb.0: # %entry
+; CHECK: # %bb.1: # %entry
+; CHECK: # %bb.2: # %if.then
+; CHECK-NEXT: callq bar
+; CHECK: jmp bar # TAILCALL
+; CHECK: .LBB0_3: # %if.then2
+; CHECK: callq bar
+; CHECK: .LBB0_4: # %if.end3
+; CHECK: jmp bar # TAILCALL