/// Enum that specifies what a AtomicRMWInst is expanded to, if at all. Exists
/// because different targets have different levels of support for these
- /// atomic RMW instructions, and also have different options w.r.t. what they should
- /// expand to.
+ /// atomic RMW instructions, and also have different options w.r.t. what they
+ /// should expand to.
enum class AtomicRMWExpansionKind {
None, // Don't expand the instruction.
LLSC, // Expand the instruction into loadlinked/storeconditional; used
/// isLoadBitCastBeneficial() - Return true if the following transform
/// is beneficial.
/// fold (conv (load x)) -> (load (conv*)x)
- /// On architectures that don't natively support some vector loads efficiently,
- /// casting the load to a smaller vector of larger types and loading
- /// is more efficient, however, this can be undone by optimizations in
+ /// On architectures that don't natively support some vector loads
+ /// efficiently, casting the load to a smaller vector of larger types and
+ /// loading is more efficient, however, this can be undone by optimizations in
/// dag combiner.
- virtual bool isLoadBitCastBeneficial(EVT /* Load */, EVT /* Bitcast */) const {
+ virtual bool isLoadBitCastBeneficial(EVT /* Load */,
+ EVT /* Bitcast */) const {
return true;
}
virtual bool isCheapToSpeculateCttz() const {
return false;
}
-
+
/// \brief Return true if it is cheap to speculate a call to intrinsic ctlz.
virtual bool isCheapToSpeculateCtlz() const {
return false;
/// Return how this load with extension should be treated: either it is legal,
/// needs to be promoted to a larger size, needs to be expanded to some other
/// code sequence, or the target has a custom expander for it.
- LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const {
+ LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
+ EVT MemVT) const {
if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
/// seq_cst. But if they are lowered to monotonic accesses, no amount of
/// IR-level fences can prevent it.
/// @{
- virtual Instruction* emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
- bool IsStore, bool IsLoad) const {
+ virtual Instruction *emitLeadingFence(IRBuilder<> &Builder,
+ AtomicOrdering Ord, bool IsStore,
+ bool IsLoad) const {
if (!getInsertFencesForAtomic())
return nullptr;
return nullptr;
}
- virtual Instruction* emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
- bool IsStore, bool IsLoad) const {
+ virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
+ AtomicOrdering Ord, bool IsStore,
+ bool IsLoad) const {
if (!getInsertFencesForAtomic())
return nullptr;
/// it succeeds, and nullptr otherwise.
/// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
/// another round of expansion.
- virtual LoadInst *lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
+ virtual LoadInst *
+ lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
return nullptr;
}
/// Replace/modify any TargetFrameIndex operands with a targte-dependent
/// sequence of memory operands that is recognized by PrologEpilogInserter.
- MachineBasicBlock *emitPatchPoint(MachineInstr *MI, MachineBasicBlock *MBB) const;
+ MachineBasicBlock *emitPatchPoint(MachineInstr *MI,
+ MachineBasicBlock *MBB) const;
};
/// This class defines information used to lower LLVM code to legal SelectionDAG
/// Hooks for building estimates in place of slower divisions and square
/// roots.
-
+
/// Return a reciprocal square root estimate value for the input operand.
/// The RefinementSteps output is the number of Newton-Raphson refinement
/// iterations required to generate a sufficient (though not necessarily