From 35c6dd24005d4c82274e8ec746ffa50859dd1798 Mon Sep 17 00:00:00 2001 From: Daniel Sanders Date: Tue, 7 Mar 2017 23:20:35 +0000 Subject: [PATCH] Recommit: [globalisel] Change LLT constructor string into an LLT-based object that knows how to generate it. Summary: This will allow future patches to inspect the details of the LLT. The implementation is now split between the Support and CodeGen libraries to allow TableGen to use this class without introducing layering concerns. Thanks to Ahmed Bougacha for finding a reasonable way to avoid the layering issue and providing the version of this patch without that problem. The problem with the previous commit appears to have been that TableGen was including CodeGen/LowLevelType.h instead of Support/LowLevelTypeImpl.h. Reviewers: t.p.northover, qcolombet, rovka, aditya_nandakumar, ab, javed.absar Subscribers: arsenm, nhaehnle, mgorny, dberris, llvm-commits, kristof.beyls Differential Revision: https://reviews.llvm.org/D30046 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@297241 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/CodeGen/LowLevelType.h | 190 ++------------------------- include/llvm/Support/LowLevelTypeImpl.h | 202 +++++++++++++++++++++++++++++ include/llvm/module.modulemap | 10 +- lib/CodeGen/GlobalISel/IRTranslator.cpp | 24 ++-- lib/CodeGen/LowLevelType.cpp | 55 ++------ lib/Support/CMakeLists.txt | 1 + lib/Support/LowLevelType.cpp | 47 +++++++ lib/Target/AArch64/AArch64CallLowering.cpp | 4 +- lib/Target/AMDGPU/AMDGPUCallLowering.cpp | 2 +- lib/Target/X86/X86CallLowering.cpp | 5 +- unittests/CodeGen/LowLevelTypeTest.cpp | 6 +- utils/TableGen/GlobalISelEmitter.cpp | 51 +++++--- 12 files changed, 330 insertions(+), 267 deletions(-) create mode 100644 include/llvm/Support/LowLevelTypeImpl.h create mode 100644 lib/Support/LowLevelType.cpp diff --git a/include/llvm/CodeGen/LowLevelType.h b/include/llvm/CodeGen/LowLevelType.h index b8885c3a95f..a3c5c9329f5 100644 --- a/include/llvm/CodeGen/LowLevelType.h +++ b/include/llvm/CodeGen/LowLevelType.h @@ -1,4 +1,4 @@ -//== llvm/CodeGen/GlobalISel/LowLevelType.h -------------------- -*- C++ -*-==// +//== llvm/CodeGen/LowLevelType.h ------------------------------- -*- C++ -*-==// // // The LLVM Compiler Infrastructure // @@ -10,197 +10,23 @@ /// Implement a low-level type suitable for MachineInstr level instruction /// selection. /// -/// For a type attached to a MachineInstr, we only care about 2 details: total -/// size and the number of vector lanes (if any). Accordingly, there are 4 -/// possible valid type-kinds: -/// -/// * `sN` for scalars and aggregates -/// * `` for vectors, which must have at least 2 elements. -/// * `pN` for pointers -/// -/// Other information required for correct selection is expected to be carried -/// by the opcode, or non-type flags. For example the distinction between G_ADD -/// and G_FADD for int/float or fast-math flags. +/// This provides the CodeGen aspects of LowLevelType, such as Type conversion. // //===----------------------------------------------------------------------===// -#ifndef LLVM_CODEGEN_GLOBALISEL_LOWLEVELTYPE_H -#define LLVM_CODEGEN_GLOBALISEL_LOWLEVELTYPE_H +#ifndef LLVM_CODEGEN_LOWLEVELTYPE_H +#define LLVM_CODEGEN_LOWLEVELTYPE_H -#include -#include "llvm/ADT/DenseMapInfo.h" -#include "llvm/CodeGen/ValueTypes.h" +#include "llvm/Support/LowLevelTypeImpl.h" namespace llvm { class DataLayout; -class LLVMContext; class Type; -class raw_ostream; - -class LLT { -public: - enum TypeKind : uint16_t { - Invalid, - Scalar, - Pointer, - Vector, - }; - - /// Get a low-level scalar or aggregate "bag of bits". - static LLT scalar(unsigned SizeInBits) { - assert(SizeInBits > 0 && "invalid scalar size"); - return LLT{Scalar, 1, SizeInBits}; - } - - /// Get a low-level pointer in the given address space (defaulting to 0). - static LLT pointer(uint16_t AddressSpace, unsigned SizeInBits) { - return LLT{Pointer, AddressSpace, SizeInBits}; - } - - /// Get a low-level vector of some number of elements and element width. - /// \p NumElements must be at least 2. - static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits) { - assert(NumElements > 1 && "invalid number of vector elements"); - return LLT{Vector, NumElements, ScalarSizeInBits}; - } - - /// Get a low-level vector of some number of elements and element type. - static LLT vector(uint16_t NumElements, LLT ScalarTy) { - assert(NumElements > 1 && "invalid number of vector elements"); - assert(ScalarTy.isScalar() && "invalid vector element type"); - return LLT{Vector, NumElements, ScalarTy.getSizeInBits()}; - } - - explicit LLT(TypeKind Kind, uint16_t NumElements, unsigned SizeInBits) - : SizeInBits(SizeInBits), ElementsOrAddrSpace(NumElements), Kind(Kind) { - assert((Kind != Vector || ElementsOrAddrSpace > 1) && - "invalid number of vector elements"); - } - - explicit LLT() : SizeInBits(0), ElementsOrAddrSpace(0), Kind(Invalid) {} - - /// Construct a low-level type based on an LLVM type. - explicit LLT(Type &Ty, const DataLayout &DL); - - explicit LLT(MVT VT); - - bool isValid() const { return Kind != Invalid; } - - bool isScalar() const { return Kind == Scalar; } - - bool isPointer() const { return Kind == Pointer; } - - bool isVector() const { return Kind == Vector; } - - /// Returns the number of elements in a vector LLT. Must only be called on - /// vector types. - uint16_t getNumElements() const { - assert(isVector() && "cannot get number of elements on scalar/aggregate"); - return ElementsOrAddrSpace; - } - - /// Returns the total size of the type. Must only be called on sized types. - unsigned getSizeInBits() const { - if (isPointer() || isScalar()) - return SizeInBits; - return SizeInBits * ElementsOrAddrSpace; - } - - unsigned getScalarSizeInBits() const { - return SizeInBits; - } - - unsigned getAddressSpace() const { - assert(isPointer() && "cannot get address space of non-pointer type"); - return ElementsOrAddrSpace; - } - - /// Returns the vector's element type. Only valid for vector types. - LLT getElementType() const { - assert(isVector() && "cannot get element type of scalar/aggregate"); - return scalar(SizeInBits); - } - - /// Get a low-level type with half the size of the original, by halving the - /// size of the scalar type involved. For example `s32` will become `s16`, - /// `<2 x s32>` will become `<2 x s16>`. - LLT halfScalarSize() const { - assert(!isPointer() && getScalarSizeInBits() > 1 && - getScalarSizeInBits() % 2 == 0 && "cannot half size of this type"); - return LLT{Kind, ElementsOrAddrSpace, SizeInBits / 2}; - } - - /// Get a low-level type with twice the size of the original, by doubling the - /// size of the scalar type involved. For example `s32` will become `s64`, - /// `<2 x s32>` will become `<2 x s64>`. - LLT doubleScalarSize() const { - assert(!isPointer() && "cannot change size of this type"); - return LLT{Kind, ElementsOrAddrSpace, SizeInBits * 2}; - } - - /// Get a low-level type with half the size of the original, by halving the - /// number of vector elements of the scalar type involved. The source must be - /// a vector type with an even number of elements. For example `<4 x s32>` - /// will become `<2 x s32>`, `<2 x s32>` will become `s32`. - LLT halfElements() const { - assert(isVector() && ElementsOrAddrSpace % 2 == 0 && - "cannot half odd vector"); - if (ElementsOrAddrSpace == 2) - return scalar(SizeInBits); - - return LLT{Vector, static_cast(ElementsOrAddrSpace / 2), - SizeInBits}; - } - - /// Get a low-level type with twice the size of the original, by doubling the - /// number of vector elements of the scalar type involved. The source must be - /// a vector type. For example `<2 x s32>` will become `<4 x s32>`. Doubling - /// the number of elements in sN produces <2 x sN>. - LLT doubleElements() const { - assert(!isPointer() && "cannot double elements in pointer"); - return LLT{Vector, static_cast(ElementsOrAddrSpace * 2), - SizeInBits}; - } - - void print(raw_ostream &OS) const; - - bool operator==(const LLT &RHS) const { - return Kind == RHS.Kind && SizeInBits == RHS.SizeInBits && - ElementsOrAddrSpace == RHS.ElementsOrAddrSpace; - } - - bool operator!=(const LLT &RHS) const { return !(*this == RHS); } - - friend struct DenseMapInfo; -private: - unsigned SizeInBits; - uint16_t ElementsOrAddrSpace; - TypeKind Kind; -}; - -inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) { - Ty.print(OS); - return OS; -} -template<> struct DenseMapInfo { - static inline LLT getEmptyKey() { - return LLT{LLT::Invalid, 0, -1u}; - } - static inline LLT getTombstoneKey() { - return LLT{LLT::Invalid, 0, -2u}; - } - static inline unsigned getHashValue(const LLT &Ty) { - uint64_t Val = ((uint64_t)Ty.SizeInBits << 32) | - ((uint64_t)Ty.ElementsOrAddrSpace << 16) | (uint64_t)Ty.Kind; - return DenseMapInfo::getHashValue(Val); - } - static bool isEqual(const LLT &LHS, const LLT &RHS) { - return LHS == RHS; - } -}; +/// Construct a low-level type based on an LLVM type. +LLT getLLTForType(Type &Ty, const DataLayout &DL); } -#endif +#endif // LLVM_CODEGEN_LOWLEVELTYPE_H diff --git a/include/llvm/Support/LowLevelTypeImpl.h b/include/llvm/Support/LowLevelTypeImpl.h new file mode 100644 index 00000000000..02df4d806f1 --- /dev/null +++ b/include/llvm/Support/LowLevelTypeImpl.h @@ -0,0 +1,202 @@ +//== llvm/Support/LowLevelTypeImpl.h --------------------------- -*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// Implement a low-level type suitable for MachineInstr level instruction +/// selection. +/// +/// For a type attached to a MachineInstr, we only care about 2 details: total +/// size and the number of vector lanes (if any). Accordingly, there are 4 +/// possible valid type-kinds: +/// +/// * `sN` for scalars and aggregates +/// * `` for vectors, which must have at least 2 elements. +/// * `pN` for pointers +/// +/// Other information required for correct selection is expected to be carried +/// by the opcode, or non-type flags. For example the distinction between G_ADD +/// and G_FADD for int/float or fast-math flags. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_LOWLEVELTYPEIMPL_H +#define LLVM_SUPPORT_LOWLEVELTYPEIMPL_H + +#include +#include "llvm/ADT/DenseMapInfo.h" +#include "llvm/CodeGen/MachineValueType.h" + +namespace llvm { + +class DataLayout; +class Type; +class raw_ostream; + +class LLT { +public: + enum TypeKind : uint16_t { + Invalid, + Scalar, + Pointer, + Vector, + }; + + /// Get a low-level scalar or aggregate "bag of bits". + static LLT scalar(unsigned SizeInBits) { + assert(SizeInBits > 0 && "invalid scalar size"); + return LLT{Scalar, 1, SizeInBits}; + } + + /// Get a low-level pointer in the given address space (defaulting to 0). + static LLT pointer(uint16_t AddressSpace, unsigned SizeInBits) { + return LLT{Pointer, AddressSpace, SizeInBits}; + } + + /// Get a low-level vector of some number of elements and element width. + /// \p NumElements must be at least 2. + static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits) { + assert(NumElements > 1 && "invalid number of vector elements"); + return LLT{Vector, NumElements, ScalarSizeInBits}; + } + + /// Get a low-level vector of some number of elements and element type. + static LLT vector(uint16_t NumElements, LLT ScalarTy) { + assert(NumElements > 1 && "invalid number of vector elements"); + assert(ScalarTy.isScalar() && "invalid vector element type"); + return LLT{Vector, NumElements, ScalarTy.getSizeInBits()}; + } + + explicit LLT(TypeKind Kind, uint16_t NumElements, unsigned SizeInBits) + : SizeInBits(SizeInBits), ElementsOrAddrSpace(NumElements), Kind(Kind) { + assert((Kind != Vector || ElementsOrAddrSpace > 1) && + "invalid number of vector elements"); + } + + explicit LLT() : SizeInBits(0), ElementsOrAddrSpace(0), Kind(Invalid) {} + + explicit LLT(MVT VT); + + bool isValid() const { return Kind != Invalid; } + + bool isScalar() const { return Kind == Scalar; } + + bool isPointer() const { return Kind == Pointer; } + + bool isVector() const { return Kind == Vector; } + + /// Returns the number of elements in a vector LLT. Must only be called on + /// vector types. + uint16_t getNumElements() const { + assert(isVector() && "cannot get number of elements on scalar/aggregate"); + return ElementsOrAddrSpace; + } + + /// Returns the total size of the type. Must only be called on sized types. + unsigned getSizeInBits() const { + if (isPointer() || isScalar()) + return SizeInBits; + return SizeInBits * ElementsOrAddrSpace; + } + + unsigned getScalarSizeInBits() const { + return SizeInBits; + } + + unsigned getAddressSpace() const { + assert(isPointer() && "cannot get address space of non-pointer type"); + return ElementsOrAddrSpace; + } + + /// Returns the vector's element type. Only valid for vector types. + LLT getElementType() const { + assert(isVector() && "cannot get element type of scalar/aggregate"); + return scalar(SizeInBits); + } + + /// Get a low-level type with half the size of the original, by halving the + /// size of the scalar type involved. For example `s32` will become `s16`, + /// `<2 x s32>` will become `<2 x s16>`. + LLT halfScalarSize() const { + assert(!isPointer() && getScalarSizeInBits() > 1 && + getScalarSizeInBits() % 2 == 0 && "cannot half size of this type"); + return LLT{Kind, ElementsOrAddrSpace, SizeInBits / 2}; + } + + /// Get a low-level type with twice the size of the original, by doubling the + /// size of the scalar type involved. For example `s32` will become `s64`, + /// `<2 x s32>` will become `<2 x s64>`. + LLT doubleScalarSize() const { + assert(!isPointer() && "cannot change size of this type"); + return LLT{Kind, ElementsOrAddrSpace, SizeInBits * 2}; + } + + /// Get a low-level type with half the size of the original, by halving the + /// number of vector elements of the scalar type involved. The source must be + /// a vector type with an even number of elements. For example `<4 x s32>` + /// will become `<2 x s32>`, `<2 x s32>` will become `s32`. + LLT halfElements() const { + assert(isVector() && ElementsOrAddrSpace % 2 == 0 && + "cannot half odd vector"); + if (ElementsOrAddrSpace == 2) + return scalar(SizeInBits); + + return LLT{Vector, static_cast(ElementsOrAddrSpace / 2), + SizeInBits}; + } + + /// Get a low-level type with twice the size of the original, by doubling the + /// number of vector elements of the scalar type involved. The source must be + /// a vector type. For example `<2 x s32>` will become `<4 x s32>`. Doubling + /// the number of elements in sN produces <2 x sN>. + LLT doubleElements() const { + assert(!isPointer() && "cannot double elements in pointer"); + return LLT{Vector, static_cast(ElementsOrAddrSpace * 2), + SizeInBits}; + } + + void print(raw_ostream &OS) const; + + bool operator==(const LLT &RHS) const { + return Kind == RHS.Kind && SizeInBits == RHS.SizeInBits && + ElementsOrAddrSpace == RHS.ElementsOrAddrSpace; + } + + bool operator!=(const LLT &RHS) const { return !(*this == RHS); } + + friend struct DenseMapInfo; +private: + unsigned SizeInBits; + uint16_t ElementsOrAddrSpace; + TypeKind Kind; +}; + +inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) { + Ty.print(OS); + return OS; +} + +template<> struct DenseMapInfo { + static inline LLT getEmptyKey() { + return LLT{LLT::Invalid, 0, -1u}; + } + static inline LLT getTombstoneKey() { + return LLT{LLT::Invalid, 0, -2u}; + } + static inline unsigned getHashValue(const LLT &Ty) { + uint64_t Val = ((uint64_t)Ty.SizeInBits << 32) | + ((uint64_t)Ty.ElementsOrAddrSpace << 16) | (uint64_t)Ty.Kind; + return DenseMapInfo::getHashValue(Val); + } + static bool isEqual(const LLT &LHS, const LLT &RHS) { + return LHS == RHS; + } +}; + +} + +#endif // LLVM_SUPPORT_LOWLEVELTYPEIMPL_H diff --git a/include/llvm/module.modulemap b/include/llvm/module.modulemap index 71cc11b7c9a..59b1f162103 100644 --- a/include/llvm/module.modulemap +++ b/include/llvm/module.modulemap @@ -284,12 +284,12 @@ module LLVM_Utils { header "Support/ConvertUTF.h" export * } -} -module LLVM_CodeGen_MachineValueType { - requires cplusplus - header "CodeGen/MachineValueType.h" - export * + module LLVM_CodeGen_MachineValueType { + requires cplusplus + header "CodeGen/MachineValueType.h" + export * + } } // This is used for a $src == $build compilation. Otherwise we use diff --git a/lib/CodeGen/GlobalISel/IRTranslator.cpp b/lib/CodeGen/GlobalISel/IRTranslator.cpp index 817428bfb03..da6fca67c8c 100644 --- a/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -82,7 +82,8 @@ unsigned IRTranslator::getOrCreateVReg(const Value &Val) { // we need to concat together to produce the value. assert(Val.getType()->isSized() && "Don't know how to create an empty vreg"); - unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), *DL}); + unsigned VReg = + MRI->createGenericVirtualRegister(getLLTForType(*Val.getType(), *DL)); ValReg = VReg; if (auto CV = dyn_cast(&Val)) { @@ -245,7 +246,7 @@ bool IRTranslator::translateSwitch(const User &U, const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition()); const BasicBlock *OrigBB = SwInst.getParent(); - LLT LLTi1 = LLT(*Type::getInt1Ty(U.getContext()), *DL); + LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL); for (auto &CaseIt : SwInst.cases()) { const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue()); const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1); @@ -301,7 +302,7 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { unsigned Res = getOrCreateVReg(LI); unsigned Addr = getOrCreateVReg(*LI.getPointerOperand()); - LLT VTy{*LI.getType(), *DL}, PTy{*LI.getPointerOperand()->getType(), *DL}; + MIRBuilder.buildLoad( Res, Addr, *MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()), @@ -319,8 +320,6 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { unsigned Val = getOrCreateVReg(*SI.getValueOperand()); unsigned Addr = getOrCreateVReg(*SI.getPointerOperand()); - LLT VTy{*SI.getValueOperand()->getType(), *DL}, - PTy{*SI.getPointerOperand()->getType(), *DL}; MIRBuilder.buildStore( Val, Addr, @@ -397,7 +396,8 @@ bool IRTranslator::translateSelect(const User &U, bool IRTranslator::translateBitCast(const User &U, MachineIRBuilder &MIRBuilder) { // If we're bitcasting to the source type, we can reuse the source vreg. - if (LLT{*U.getOperand(0)->getType(), *DL} == LLT{*U.getType(), *DL}) { + if (getLLTForType(*U.getOperand(0)->getType(), *DL) == + getLLTForType(*U.getType(), *DL)) { // Get the source vreg now, to avoid invalidating ValToVReg. unsigned SrcReg = getOrCreateVReg(*U.getOperand(0)); unsigned &Reg = ValToVReg[&U]; @@ -428,7 +428,7 @@ bool IRTranslator::translateGetElementPtr(const User &U, Value &Op0 = *U.getOperand(0); unsigned BaseReg = getOrCreateVReg(Op0); - LLT PtrTy{*Op0.getType(), *DL}; + LLT PtrTy = getLLTForType(*Op0.getType(), *DL); unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace()); LLT OffsetTy = LLT::scalar(PtrSize); @@ -494,7 +494,7 @@ bool IRTranslator::translateGetElementPtr(const User &U, bool IRTranslator::translateMemfunc(const CallInst &CI, MachineIRBuilder &MIRBuilder, unsigned ID) { - LLT SizeTy{*CI.getArgOperand(2)->getType(), *DL}; + LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL); Type *DstTy = CI.getArgOperand(0)->getType(); if (cast(DstTy)->getAddressSpace() != 0 || SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0)) @@ -551,7 +551,7 @@ void IRTranslator::getStackGuard(unsigned DstReg, bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, MachineIRBuilder &MIRBuilder) { - LLT Ty{*CI.getOperand(0)->getType(), *DL}; + LLT Ty = getLLTForType(*CI.getOperand(0)->getType(), *DL); LLT s1 = LLT::scalar(1); unsigned Width = Ty.getSizeInBits(); unsigned Res = MRI->createGenericVirtualRegister(Ty); @@ -694,7 +694,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, getStackGuard(getOrCreateVReg(CI), MIRBuilder); return true; case Intrinsic::stackprotector: { - LLT PtrTy{*CI.getArgOperand(0)->getType(), *DL}; + LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy); getStackGuard(GuardVal, MIRBuilder); @@ -844,7 +844,7 @@ bool IRTranslator::translateLandingPad(const User &U, SmallVector Tys; for (Type *Ty : cast(LP.getType())->elements()) - Tys.push_back(LLT{*Ty, *DL}); + Tys.push_back(getLLTForType(*Ty, *DL)); assert(Tys.size() == 2 && "Only two-valued landingpads are supported"); // Mark exception register as live in. @@ -907,7 +907,7 @@ bool IRTranslator::translateAlloca(const User &U, MIRBuilder.buildConstant(TySize, -DL->getTypeAllocSize(Ty)); MIRBuilder.buildMul(AllocSize, NumElts, TySize); - LLT PtrTy = LLT{*AI.getType(), *DL}; + LLT PtrTy = getLLTForType(*AI.getType(), *DL); auto &TLI = *MF->getSubtarget().getTargetLowering(); unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); diff --git a/lib/CodeGen/LowLevelType.cpp b/lib/CodeGen/LowLevelType.cpp index d74b7306e0f..c4b9068fa90 100644 --- a/lib/CodeGen/LowLevelType.cpp +++ b/lib/CodeGen/LowLevelType.cpp @@ -1,4 +1,4 @@ -//===-- llvm/CodeGen/GlobalISel/LowLevelType.cpp --------------------------===// +//===-- llvm/CodeGen/LowLevelType.cpp -------------------------------------===// // // The LLVM Compiler Infrastructure // @@ -18,54 +18,21 @@ #include "llvm/Support/raw_ostream.h" using namespace llvm; -LLT::LLT(Type &Ty, const DataLayout &DL) { +LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) { if (auto VTy = dyn_cast(&Ty)) { - SizeInBits = VTy->getElementType()->getPrimitiveSizeInBits(); - ElementsOrAddrSpace = VTy->getNumElements(); - Kind = ElementsOrAddrSpace == 1 ? Scalar : Vector; + auto NumElements = VTy->getNumElements(); + auto ScalarSizeInBits = VTy->getElementType()->getPrimitiveSizeInBits(); + if (NumElements == 1) + return LLT::scalar(ScalarSizeInBits); + return LLT::vector(NumElements, ScalarSizeInBits); } else if (auto PTy = dyn_cast(&Ty)) { - Kind = Pointer; - SizeInBits = DL.getTypeSizeInBits(&Ty); - ElementsOrAddrSpace = PTy->getAddressSpace(); + return LLT::pointer(PTy->getAddressSpace(), DL.getTypeSizeInBits(&Ty)); } else if (Ty.isSized()) { // Aggregates are no different from real scalars as far as GlobalISel is // concerned. - Kind = Scalar; - SizeInBits = DL.getTypeSizeInBits(&Ty); - ElementsOrAddrSpace = 1; + auto SizeInBits = DL.getTypeSizeInBits(&Ty); assert(SizeInBits != 0 && "invalid zero-sized type"); - } else { - Kind = Invalid; - SizeInBits = ElementsOrAddrSpace = 0; + return LLT::scalar(SizeInBits); } -} - -LLT::LLT(MVT VT) { - if (VT.isVector()) { - SizeInBits = VT.getVectorElementType().getSizeInBits(); - ElementsOrAddrSpace = VT.getVectorNumElements(); - Kind = ElementsOrAddrSpace == 1 ? Scalar : Vector; - } else if (VT.isValid()) { - // Aggregates are no different from real scalars as far as GlobalISel is - // concerned. - Kind = Scalar; - SizeInBits = VT.getSizeInBits(); - ElementsOrAddrSpace = 1; - assert(SizeInBits != 0 && "invalid zero-sized type"); - } else { - Kind = Invalid; - SizeInBits = ElementsOrAddrSpace = 0; - } -} - -void LLT::print(raw_ostream &OS) const { - if (isVector()) - OS << "<" << ElementsOrAddrSpace << " x s" << SizeInBits << ">"; - else if (isPointer()) - OS << "p" << getAddressSpace(); - else if (isValid()) { - assert(isScalar() && "unexpected type"); - OS << "s" << getScalarSizeInBits(); - } else - llvm_unreachable("trying to print an invalid type"); + return LLT(); } diff --git a/lib/Support/CMakeLists.txt b/lib/Support/CMakeLists.txt index ae2715567c5..a46167e8714 100644 --- a/lib/Support/CMakeLists.txt +++ b/lib/Support/CMakeLists.txt @@ -71,6 +71,7 @@ add_llvm_library(LLVMSupport LineIterator.cpp Locale.cpp LockFileManager.cpp + LowLevelType.cpp ManagedStatic.cpp MathExtras.cpp MemoryBuffer.cpp diff --git a/lib/Support/LowLevelType.cpp b/lib/Support/LowLevelType.cpp new file mode 100644 index 00000000000..4290d69cd19 --- /dev/null +++ b/lib/Support/LowLevelType.cpp @@ -0,0 +1,47 @@ +//===-- llvm/Support/LowLevelType.cpp -------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// \file This file implements the more header-heavy bits of the LLT class to +/// avoid polluting users' namespaces. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/LowLevelTypeImpl.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; + +LLT::LLT(MVT VT) { + if (VT.isVector()) { + SizeInBits = VT.getVectorElementType().getSizeInBits(); + ElementsOrAddrSpace = VT.getVectorNumElements(); + Kind = ElementsOrAddrSpace == 1 ? Scalar : Vector; + } else if (VT.isValid()) { + // Aggregates are no different from real scalars as far as GlobalISel is + // concerned. + Kind = Scalar; + SizeInBits = VT.getSizeInBits(); + ElementsOrAddrSpace = 1; + assert(SizeInBits != 0 && "invalid zero-sized type"); + } else { + Kind = Invalid; + SizeInBits = ElementsOrAddrSpace = 0; + } +} + +void LLT::print(raw_ostream &OS) const { + if (isVector()) + OS << "<" << ElementsOrAddrSpace << " x s" << SizeInBits << ">"; + else if (isPointer()) + OS << "p" << getAddressSpace(); + else if (isValid()) { + assert(isScalar() && "unexpected type"); + OS << "s" << getScalarSizeInBits(); + } else + llvm_unreachable("trying to print an invalid type"); +} diff --git a/lib/Target/AArch64/AArch64CallLowering.cpp b/lib/Target/AArch64/AArch64CallLowering.cpp index 4ccd6b68397..ea831eff654 100644 --- a/lib/Target/AArch64/AArch64CallLowering.cpp +++ b/lib/Target/AArch64/AArch64CallLowering.cpp @@ -196,8 +196,8 @@ void AArch64CallLowering::splitToValueTypes( // FIXME: set split flags if they're actually used (e.g. i128 on AAPCS). Type *SplitTy = SplitVT.getTypeForEVT(Ctx); SplitArgs.push_back( - ArgInfo{MRI.createGenericVirtualRegister(LLT{*SplitTy, DL}), SplitTy, - OrigArg.Flags, OrigArg.IsFixed}); + ArgInfo{MRI.createGenericVirtualRegister(getLLTForType(*SplitTy, DL)), + SplitTy, OrigArg.Flags, OrigArg.IsFixed}); } for (unsigned i = 0; i < Offsets.size(); ++i) diff --git a/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/lib/Target/AMDGPU/AMDGPUCallLowering.cpp index ae5fb358154..ce70d150e52 100644 --- a/lib/Target/AMDGPU/AMDGPUCallLowering.cpp +++ b/lib/Target/AMDGPU/AMDGPUCallLowering.cpp @@ -50,7 +50,7 @@ unsigned AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder, const Function &F = *MF.getFunction(); const DataLayout &DL = F.getParent()->getDataLayout(); PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS); - LLT PtrType(*PtrTy, DL); + LLT PtrType = getLLTForType(*PtrTy, DL); unsigned DstReg = MRI.createGenericVirtualRegister(PtrType); unsigned KernArgSegmentPtr = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); diff --git a/lib/Target/X86/X86CallLowering.cpp b/lib/Target/X86/X86CallLowering.cpp index dbb74249634..4da5d0d61d8 100644 --- a/lib/Target/X86/X86CallLowering.cpp +++ b/lib/Target/X86/X86CallLowering.cpp @@ -58,8 +58,9 @@ void X86CallLowering::splitToValueTypes(const ArgInfo &OrigArg, Type *PartTy = PartVT.getTypeForEVT(Context); for (unsigned i = 0; i < NumParts; ++i) { - ArgInfo Info = ArgInfo{MRI.createGenericVirtualRegister(LLT{*PartTy, DL}), - PartTy, OrigArg.Flags}; + ArgInfo Info = + ArgInfo{MRI.createGenericVirtualRegister(getLLTForType(*PartTy, DL)), + PartTy, OrigArg.Flags}; SplitArgs.push_back(Info); PerformArgSplit(Info.Reg, PartVT.getSizeInBits() * i); } diff --git a/unittests/CodeGen/LowLevelTypeTest.cpp b/unittests/CodeGen/LowLevelTypeTest.cpp index 4ea181c1c9d..67113005a46 100644 --- a/unittests/CodeGen/LowLevelTypeTest.cpp +++ b/unittests/CodeGen/LowLevelTypeTest.cpp @@ -68,7 +68,7 @@ TEST(LowLevelTypeTest, Scalar) { // Test Type->LLT conversion. Type *IRTy = IntegerType::get(C, S); - EXPECT_EQ(Ty, LLT(*IRTy, DL)); + EXPECT_EQ(Ty, getLLTForType(*IRTy, DL)); } } @@ -160,7 +160,7 @@ TEST(LowLevelTypeTest, Vector) { // Test Type->LLT conversion. Type *IRSTy = IntegerType::get(C, S); Type *IRTy = VectorType::get(IRSTy, Elts); - EXPECT_EQ(VTy, LLT(*IRTy, DL)); + EXPECT_EQ(VTy, getLLTForType(*IRTy, DL)); } } } @@ -188,7 +188,7 @@ TEST(LowLevelTypeTest, Pointer) { // Test Type->LLT conversion. Type *IRTy = PointerType::get(IntegerType::get(C, 8), AS); - EXPECT_EQ(Ty, LLT(*IRTy, DL)); + EXPECT_EQ(Ty, getLLTForType(*IRTy, DL)); } } diff --git a/utils/TableGen/GlobalISelEmitter.cpp b/utils/TableGen/GlobalISelEmitter.cpp index 238a50a94c6..3df189bcf6e 100644 --- a/utils/TableGen/GlobalISelEmitter.cpp +++ b/utils/TableGen/GlobalISelEmitter.cpp @@ -36,6 +36,7 @@ #include "llvm/CodeGen/MachineValueType.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Error.h" +#include "llvm/Support/LowLevelTypeImpl.h" #include "llvm/Support/ScopedPrinter.h" #include "llvm/TableGen/Error.h" #include "llvm/TableGen/Record.h" @@ -58,22 +59,38 @@ static cl::opt WarnOnSkippedPatterns( //===- Helper functions ---------------------------------------------------===// +/// This class stands in for LLT wherever we want to tablegen-erate an +/// equivalent at compiler run-time. +class LLTCodeGen { +private: + LLT Ty; + +public: + LLTCodeGen(const LLT &Ty) : Ty(Ty) {} + + void emitCxxConstructorCall(raw_ostream &OS) const { + if (Ty.isScalar()) { + OS << "LLT::scalar(" << Ty.getSizeInBits() << ")"; + return; + } + if (Ty.isVector()) { + OS << "LLT::vector(" << Ty.getNumElements() << ", " << Ty.getSizeInBits() + << ")"; + return; + } + llvm_unreachable("Unhandled LLT"); + } +}; + /// Convert an MVT to an equivalent LLT if possible, or the invalid LLT() for /// MVTs that don't map cleanly to an LLT (e.g., iPTR, *any, ...). -static Optional MVTToLLT(MVT::SimpleValueType SVT) { - std::string TyStr; - raw_string_ostream OS(TyStr); +static Optional MVTToLLT(MVT::SimpleValueType SVT) { MVT VT(SVT); - if (VT.isVector() && VT.getVectorNumElements() != 1) { - OS << "LLT::vector(" << VT.getVectorNumElements() << ", " - << VT.getScalarSizeInBits() << ")"; - } else if (VT.isInteger() || VT.isFloatingPoint()) { - OS << "LLT::scalar(" << VT.getSizeInBits() << ")"; - } else { - return None; - } - OS.flush(); - return TyStr; + if (VT.isVector() && VT.getVectorNumElements() != 1) + return LLTCodeGen(LLT::vector(VT.getVectorNumElements(), VT.getScalarSizeInBits())); + if (VT.isInteger() || VT.isFloatingPoint()) + return LLTCodeGen(LLT::scalar(VT.getSizeInBits())); + return None; } static bool isTrivialOperatorNode(const TreePatternNode *N) { @@ -167,10 +184,10 @@ public: /// Generates code to check that an operand is a particular LLT. class LLTOperandMatcher : public OperandPredicateMatcher { protected: - std::string Ty; + LLTCodeGen Ty; public: - LLTOperandMatcher(std::string Ty) + LLTOperandMatcher(const LLTCodeGen &Ty) : OperandPredicateMatcher(OPM_LLT), Ty(Ty) {} static bool classof(const OperandPredicateMatcher *P) { @@ -179,7 +196,9 @@ public: void emitCxxPredicateExpr(raw_ostream &OS, StringRef OperandExpr) const override { - OS << "MRI.getType(" << OperandExpr << ".getReg()) == (" << Ty << ")"; + OS << "MRI.getType(" << OperandExpr << ".getReg()) == ("; + Ty.emitCxxConstructorCall(OS); + OS << ")"; } }; -- 2.11.0