From f3d2de3832a1ca028869ac614df32a9698fc6535 Mon Sep 17 00:00:00 2001 From: Ahmed Bougacha Date: Fri, 11 Sep 2015 17:08:17 +0000 Subject: [PATCH] [CodeGen] Rename AtomicRMWExpansionKind to AtomicExpansionKind. This lets us generalize its usage to the other atomic instructions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@247428 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Target/TargetLowering.h | 7 +++---- lib/CodeGen/AtomicExpandPass.cpp | 6 +++--- lib/Target/AArch64/AArch64ISelLowering.cpp | 5 ++--- lib/Target/AArch64/AArch64ISelLowering.h | 2 +- lib/Target/ARM/ARMISelLowering.cpp | 6 +++--- lib/Target/ARM/ARMISelLowering.h | 2 +- lib/Target/Hexagon/HexagonISelLowering.h | 6 +++--- lib/Target/X86/X86ISelLowering.cpp | 14 +++++++------- lib/Target/X86/X86ISelLowering.h | 2 +- 9 files changed, 24 insertions(+), 26 deletions(-) diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index 99e750e3de5..e8cbb697b15 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -128,7 +128,7 @@ public: /// because different targets have different levels of support for these /// atomic RMW instructions, and also have different options w.r.t. what they /// should expand to. - enum class AtomicRMWExpansionKind { + enum class AtomicExpansionKind { None, // Don't expand the instruction. LLSC, // Expand the instruction into loadlinked/storeconditional; used // by ARM/AArch64. Implies `hasLoadLinkedStoreConditional` @@ -1120,9 +1120,8 @@ public: /// Returns how the IR-level AtomicExpand pass should expand the given /// AtomicRMW, if at all. Default is to never expand. - virtual AtomicRMWExpansionKind - shouldExpandAtomicRMWInIR(AtomicRMWInst *) const { - return AtomicRMWExpansionKind::None; + virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const { + return AtomicExpansionKind::None; } /// On some platforms, an AtomicRMW that never actually modifies the value diff --git a/lib/CodeGen/AtomicExpandPass.cpp b/lib/CodeGen/AtomicExpandPass.cpp index c10648c6e3b..863f7a40f1f 100644 --- a/lib/CodeGen/AtomicExpandPass.cpp +++ b/lib/CodeGen/AtomicExpandPass.cpp @@ -240,9 +240,9 @@ static void createCmpXchgInstFun(IRBuilder<> &Builder, Value *Addr, bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) { switch (TLI->shouldExpandAtomicRMWInIR(AI)) { - case TargetLoweringBase::AtomicRMWExpansionKind::None: + case TargetLoweringBase::AtomicExpansionKind::None: return false; - case TargetLoweringBase::AtomicRMWExpansionKind::LLSC: { + case TargetLoweringBase::AtomicExpansionKind::LLSC: { assert(TLI->hasLoadLinkedStoreConditional() && "TargetLowering requested we expand AtomicRMW instruction into " "load-linked/store-conditional combos, but such instructions aren't " @@ -250,7 +250,7 @@ bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) { return expandAtomicRMWToLLSC(AI); } - case TargetLoweringBase::AtomicRMWExpansionKind::CmpXChg: { + case TargetLoweringBase::AtomicExpansionKind::CmpXChg: { return expandAtomicRMWToCmpXchg(AI, createCmpXchgInstFun); } } diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index a51b75809b5..9145c1e9cbf 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -9498,11 +9498,10 @@ bool AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { } // For the real atomic operations, we have ldxr/stxr up to 128 bits, -TargetLoweringBase::AtomicRMWExpansionKind +TargetLoweringBase::AtomicExpansionKind AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { unsigned Size = AI->getType()->getPrimitiveSizeInBits(); - return Size <= 128 ? AtomicRMWExpansionKind::LLSC - : AtomicRMWExpansionKind::None; + return Size <= 128 ? AtomicExpansionKind::LLSC : AtomicExpansionKind::None; } bool AArch64TargetLowering::hasLoadLinkedStoreConditional() const { diff --git a/lib/Target/AArch64/AArch64ISelLowering.h b/lib/Target/AArch64/AArch64ISelLowering.h index ab365789586..c437ec2b351 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.h +++ b/lib/Target/AArch64/AArch64ISelLowering.h @@ -351,7 +351,7 @@ public: bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override; bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; - TargetLoweringBase::AtomicRMWExpansionKind + TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; bool useLoadStackGuardNode() const override; diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 633c4a66f8e..eeea67451ae 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -11534,12 +11534,12 @@ bool ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { // For the real atomic operations, we have ldrex/strex up to 32 bits, // and up to 64 bits on the non-M profiles -TargetLoweringBase::AtomicRMWExpansionKind +TargetLoweringBase::AtomicExpansionKind ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { unsigned Size = AI->getType()->getPrimitiveSizeInBits(); return (Size <= (Subtarget->isMClass() ? 32U : 64U)) - ? AtomicRMWExpansionKind::LLSC - : AtomicRMWExpansionKind::None; + ? AtomicExpansionKind::LLSC + : AtomicExpansionKind::None; } // This has so far only been implemented for MachO. diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h index a9009de5169..5f28ef60ff8 100644 --- a/lib/Target/ARM/ARMISelLowering.h +++ b/lib/Target/ARM/ARMISelLowering.h @@ -438,7 +438,7 @@ namespace llvm { bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override; bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; - TargetLoweringBase::AtomicRMWExpansionKind + TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; bool useLoadStackGuardNode() const override; diff --git a/lib/Target/Hexagon/HexagonISelLowering.h b/lib/Target/Hexagon/HexagonISelLowering.h index 2642abffadd..a3616821456 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.h +++ b/lib/Target/Hexagon/HexagonISelLowering.h @@ -218,9 +218,9 @@ bool isPositiveHalfWord(SDNode *N); Value *Addr, AtomicOrdering Ord) const override; bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override; bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; - AtomicRMWExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) - const override { - return AtomicRMWExpansionKind::LLSC; + AtomicExpansionKind + shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override { + return AtomicExpansionKind::LLSC; } }; } // end namespace llvm diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index bc97ea036bf..611ccb92e4f 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -18411,7 +18411,7 @@ bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { return needsCmpXchgNb(PTy->getElementType()); } -TargetLoweringBase::AtomicRMWExpansionKind +TargetLoweringBase::AtomicExpansionKind X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32; Type *MemType = AI->getType(); @@ -18419,8 +18419,8 @@ X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { // If the operand is too big, we must see if cmpxchg8/16b is available // and default to library calls otherwise. if (MemType->getPrimitiveSizeInBits() > NativeWidth) { - return needsCmpXchgNb(MemType) ? AtomicRMWExpansionKind::CmpXChg - : AtomicRMWExpansionKind::None; + return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg + : AtomicExpansionKind::None; } AtomicRMWInst::BinOp Op = AI->getOperation(); @@ -18431,14 +18431,14 @@ X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { case AtomicRMWInst::Add: case AtomicRMWInst::Sub: // It's better to use xadd, xsub or xchg for these in all cases. - return AtomicRMWExpansionKind::None; + return AtomicExpansionKind::None; case AtomicRMWInst::Or: case AtomicRMWInst::And: case AtomicRMWInst::Xor: // If the atomicrmw's result isn't actually used, we can just add a "lock" // prefix to a normal instruction for these operations. - return !AI->use_empty() ? AtomicRMWExpansionKind::CmpXChg - : AtomicRMWExpansionKind::None; + return !AI->use_empty() ? AtomicExpansionKind::CmpXChg + : AtomicExpansionKind::None; case AtomicRMWInst::Nand: case AtomicRMWInst::Max: case AtomicRMWInst::Min: @@ -18446,7 +18446,7 @@ X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { case AtomicRMWInst::UMin: // These always require a non-trivial set of data operations on x86. We must // use a cmpxchg loop. - return AtomicRMWExpansionKind::CmpXChg; + return AtomicExpansionKind::CmpXChg; } } diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index fc9b1b6c0fe..f01d4d22815 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -1055,7 +1055,7 @@ namespace llvm { bool shouldExpandAtomicLoadInIR(LoadInst *SI) const override; bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; - TargetLoweringBase::AtomicRMWExpansionKind + TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; LoadInst * -- 2.11.0