/// operand.
static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT,
SelectionDAG &DAG) {
- MVT::ValueType CurVT = VT;
+ unsigned NumBits = MVT::isVector(VT) ?
+ MVT::getSizeInBits(MVT::getVectorElementType(VT)) : MVT::getSizeInBits(VT);
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
- uint64_t Val = C->getValue() & 255;
+ APInt Val = APInt(NumBits, C->getValue() & 255);
unsigned Shift = 8;
- while (CurVT != MVT::i8) {
+ for (unsigned i = NumBits; i > 8; i >>= 1) {
Val = (Val << Shift) | Val;
Shift <<= 1;
- CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
- }
- return DAG.getConstant(Val, VT);
- } else {
- Value = DAG.getNode(ISD::ZERO_EXTEND, VT, Value);
- unsigned Shift = 8;
- while (CurVT != MVT::i8) {
- Value =
- DAG.getNode(ISD::OR, VT,
- DAG.getNode(ISD::SHL, VT, Value,
- DAG.getConstant(Shift, MVT::i8)), Value);
- Shift <<= 1;
- CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
}
+ if (MVT::isInteger(VT))
+ return DAG.getConstant(Val, VT);
+ return DAG.getConstantFP(APFloat(Val), VT);
+ }
- return Value;
+ Value = DAG.getNode(ISD::ZERO_EXTEND, VT, Value);
+ unsigned Shift = 8;
+ for (unsigned i = NumBits; i > 8; i >>= 1) {
+ Value = DAG.getNode(ISD::OR, VT,
+ DAG.getNode(ISD::SHL, VT, Value,
+ DAG.getConstant(Shift, MVT::i8)), Value);
+ Shift <<= 1;
}
+
+ return Value;
}
/// getMemsetStringVal - Similar to getMemsetValue. Except this is only
/// used when a memcpy is turned into a memset when the source is a constant
/// string ptr.
-static SDOperand getMemsetStringVal(MVT::ValueType VT,
- SelectionDAG &DAG,
+static SDOperand getMemsetStringVal(MVT::ValueType VT, SelectionDAG &DAG,
const TargetLowering &TLI,
std::string &Str, unsigned Offset) {
+ assert(!MVT::isVector(VT) && "Can't handle vector type here!");
+ unsigned NumBits = MVT::getSizeInBits(VT);
+ unsigned MSB = NumBits / 8;
uint64_t Val = 0;
- unsigned MSB = MVT::getSizeInBits(VT) / 8;
if (TLI.isLittleEndian())
Offset = Offset + MSB - 1;
for (unsigned i = 0; i != MSB; ++i) {
}
/// getMemBasePlusOffset - Returns base and offset node for the
+///
static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset,
SelectionDAG &DAG) {
MVT::ValueType VT = Base.getValueType();
return DAG.getNode(ISD::ADD, VT, Base, DAG.getConstant(Offset, VT));
}
+/// isMemSrcFromString - Returns true if memcpy source is a string constant.
+///
+static bool isMemSrcFromString(SDOperand Src, std::string &Str,
+ uint64_t &SrcOff) {
+ unsigned SrcDelta = 0;
+ GlobalAddressSDNode *G = NULL;
+ if (Src.getOpcode() == ISD::GlobalAddress)
+ G = cast<GlobalAddressSDNode>(Src);
+ else if (Src.getOpcode() == ISD::ADD &&
+ Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
+ Src.getOperand(1).getOpcode() == ISD::Constant) {
+ G = cast<GlobalAddressSDNode>(Src.getOperand(0));
+ SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getValue();
+ }
+ if (!G)
+ return false;
+
+ GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
+ if (GV && GV->isConstant()) {
+ Str = GV->getStringValue(false);
+ if (!Str.empty()) {
+ SrcOff += SrcDelta;
+ return true;
+ }
+ }
+
+ return false;
+}
+
/// MeetsMaxMemopRequirement - Determines if the number of memory ops required
/// to replace the memset / memcpy is below the threshold. It also returns the
/// types of the sequence of memory ops to perform memset / memcpy.
-static bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps,
- unsigned Limit, uint64_t Size,
- unsigned Align,
- const TargetLowering &TLI) {
- MVT::ValueType VT;
-
- if (TLI.allowsUnalignedMemoryAccesses()) {
- VT = MVT::i64;
- } else {
- switch (Align & 7) {
- case 0:
- VT = MVT::i64;
- break;
- case 4:
- VT = MVT::i32;
- break;
- case 2:
- VT = MVT::i16;
- break;
- default:
- VT = MVT::i8;
- break;
+static
+bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps,
+ SDOperand Dst, SDOperand Src,
+ unsigned Limit, uint64_t Size, unsigned &Align,
+ SelectionDAG &DAG,
+ const TargetLowering &TLI) {
+ bool AllowUnalign = TLI.allowsUnalignedMemoryAccesses();
+
+ std::string Str;
+ uint64_t SrcOff = 0;
+ bool isSrcStr = isMemSrcFromString(Src, Str, SrcOff);
+ bool isSrcConst = isa<ConstantSDNode>(Src);
+ MVT::ValueType VT= TLI.getOptimalMemOpType(Size, Align, isSrcConst, isSrcStr);
+ if (VT != MVT::iAny) {
+ unsigned NewAlign = (unsigned)
+ TLI.getTargetData()->getABITypeAlignment(MVT::getTypeForValueType(VT));
+ // If source is a string constant, this will require an unaligned load.
+ if (NewAlign > Align && (isSrcConst || AllowUnalign)) {
+ if (Dst.getOpcode() != ISD::FrameIndex) {
+ // Can't change destination alignment. It requires a unaligned store.
+ if (AllowUnalign)
+ VT = MVT::iAny;
+ } else {
+ int FI = cast<FrameIndexSDNode>(Dst)->getIndex();
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ if (MFI->isFixedObjectIndex(FI)) {
+ // Can't change destination alignment. It requires a unaligned store.
+ if (AllowUnalign)
+ VT = MVT::iAny;
+ } else {
+ // Give the stack frame object a larger alignment.
+ MFI->setObjectAlignment(FI, NewAlign);
+ Align = NewAlign;
+ }
+ }
}
}
- MVT::ValueType LVT = MVT::i64;
- while (!TLI.isTypeLegal(LVT))
- LVT = (MVT::ValueType)((unsigned)LVT - 1);
- assert(MVT::isInteger(LVT));
+ if (VT == MVT::iAny) {
+ if (AllowUnalign) {
+ VT = MVT::i64;
+ } else {
+ switch (Align & 7) {
+ case 0: VT = MVT::i64; break;
+ case 4: VT = MVT::i32; break;
+ case 2: VT = MVT::i16; break;
+ default: VT = MVT::i8; break;
+ }
+ }
+
+ MVT::ValueType LVT = MVT::i64;
+ while (!TLI.isTypeLegal(LVT))
+ LVT = (MVT::ValueType)((unsigned)LVT - 1);
+ assert(MVT::isInteger(LVT));
- if (VT > LVT)
- VT = LVT;
+ if (VT > LVT)
+ VT = LVT;
+ }
unsigned NumMemOps = 0;
while (Size != 0) {
unsigned VTSize = MVT::getSizeInBits(VT) / 8;
while (VTSize > Size) {
- VT = (MVT::ValueType)((unsigned)VT - 1);
- VTSize >>= 1;
+ // For now, only use non-vector load / store's for the left-over pieces.
+ if (MVT::isVector(VT)) {
+ VT = MVT::i64;
+ while (!TLI.isTypeLegal(VT))
+ VT = (MVT::ValueType)((unsigned)VT - 1);
+ VTSize = MVT::getSizeInBits(VT) / 8;
+ } else {
+ VT = (MVT::ValueType)((unsigned)VT - 1);
+ VTSize >>= 1;
+ }
}
- assert(MVT::isInteger(VT));
if (++NumMemOps > Limit)
return false;
static SDOperand getMemcpyLoadsAndStores(SelectionDAG &DAG,
SDOperand Chain, SDOperand Dst,
SDOperand Src, uint64_t Size,
- unsigned Align,
- bool AlwaysInline,
+ unsigned Align, bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff){
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
uint64_t Limit = -1;
if (!AlwaysInline)
Limit = TLI.getMaxStoresPerMemcpy();
- if (!MeetsMaxMemopRequirement(MemOps, Limit, Size, Align, TLI))
+ unsigned DstAlign = Align; // Destination alignment can change.
+ if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, Limit, Size, DstAlign,
+ DAG, TLI))
return SDOperand();
- SmallVector<SDOperand, 8> OutChains;
-
- unsigned NumMemOps = MemOps.size();
- unsigned SrcDelta = 0;
- GlobalAddressSDNode *G = NULL;
std::string Str;
- bool CopyFromStr = false;
uint64_t SrcOff = 0, DstOff = 0;
+ bool CopyFromStr = isMemSrcFromString(Src, Str, SrcOff);
- if (Src.getOpcode() == ISD::GlobalAddress)
- G = cast<GlobalAddressSDNode>(Src);
- else if (Src.getOpcode() == ISD::ADD &&
- Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
- Src.getOperand(1).getOpcode() == ISD::Constant) {
- G = cast<GlobalAddressSDNode>(Src.getOperand(0));
- SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getValue();
- }
- if (G) {
- GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
- if (GV && GV->isConstant()) {
- Str = GV->getStringValue(false);
- if (!Str.empty()) {
- CopyFromStr = true;
- SrcOff += SrcDelta;
- }
- }
- }
-
+ SmallVector<SDOperand, 8> OutChains;
+ unsigned NumMemOps = MemOps.size();
for (unsigned i = 0; i < NumMemOps; i++) {
MVT::ValueType VT = MemOps[i];
unsigned VTSize = MVT::getSizeInBits(VT) / 8;
SDOperand Value, Store;
- if (CopyFromStr) {
+ if (CopyFromStr && !MVT::isVector(VT)) {
+ // It's unlikely a store of a vector immediate can be done in a single
+ // instruction. It would require a load from a constantpool first.
+ // FIXME: Handle cases where store of vector immediate is done in a
+ // single instruction.
Value = getMemsetStringVal(VT, DAG, TLI, Str, SrcOff);
- Store =
- DAG.getStore(Chain, Value,
- getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstSVOff + DstOff);
+ Store = DAG.getStore(Chain, Value,
+ getMemBasePlusOffset(Dst, DstOff, DAG),
+ DstSV, DstSVOff + DstOff);
} else {
Value = DAG.getLoad(VT, Chain,
getMemBasePlusOffset(Src, SrcOff, DAG),
SrcSV, SrcSVOff + SrcOff, false, Align);
- Store =
- DAG.getStore(Chain, Value,
- getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstSVOff + DstOff, false, Align);
+ Store = DAG.getStore(Chain, Value,
+ getMemBasePlusOffset(Dst, DstOff, DAG),
+ DstSV, DstSVOff + DstOff, false, DstAlign);
}
OutChains.push_back(Store);
SrcOff += VTSize;
// Expand memset to a series of load/store ops if the size operand
// falls below a certain threshold.
std::vector<MVT::ValueType> MemOps;
- if (!MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemset(),
- Size, Align, TLI))
+ if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, TLI.getMaxStoresPerMemset(),
+ Size, Align, DAG, TLI))
return SDOperand();
SmallVector<SDOperand, 8> OutChains;
return Align;
}
+/// getOptimalMemOpType - Returns the target specific optimal type for load
+/// store operations as result of memset, memcpy, and memmove lowering.
+/// It returns MVT::iAny if SelectionDAG should be responsible for
+/// determining it.
+MVT::ValueType
+X86TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned Align,
+ bool isSrcConst, bool isSrcStr) const {
+ if ((isSrcConst || isSrcStr) && Subtarget->hasSSE2() && Size >= 16)
+ return MVT::v4i32;
+ if ((isSrcConst || isSrcStr) && Subtarget->hasSSE1() && Size >= 16)
+ return MVT::v4f32;
+ if (Subtarget->is64Bit() && Size >= 8)
+ return MVT::i64;
+ return MVT::i32;
+}
+
+
/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
/// jumptable.
SDOperand X86TargetLowering::getPICJumpTableRelocBase(SDOperand Table,
/// getZeroVector - Returns a vector of specified type with all zero elements.
///
-static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) {
+static SDOperand getZeroVector(MVT::ValueType VT, bool HasSSE2,
+ SelectionDAG &DAG) {
assert(MVT::isVector(VT) && "Expected a vector type");
// Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest
// type. This ensures they get CSE'd.
- SDOperand Cst = DAG.getTargetConstant(0, MVT::i32);
SDOperand Vec;
- if (MVT::getSizeInBits(VT) == 64) // MMX
+ if (MVT::getSizeInBits(VT) == 64) { // MMX
+ SDOperand Cst = DAG.getTargetConstant(0, MVT::i32);
Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst);
- else // SSE
+ } else if (HasSSE2) { // SSE2
+ SDOperand Cst = DAG.getTargetConstant(0, MVT::i32);
Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst);
+ } else { // SSE1
+ SDOperand Cst = DAG.getTargetConstantFP(+0.0, MVT::f32);
+ Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4f32, Cst, Cst, Cst, Cst);
+ }
return DAG.getNode(ISD::BIT_CONVERT, VT, Vec);
}
V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask);
NumElems >>= 1;
}
- Mask = getZeroVector(MVT::v4i32, DAG);
+ Mask = getZeroVector(MVT::v4i32, true, DAG);
}
V1 = DAG.getNode(ISD::BIT_CONVERT, PVT, V1);
/// element of V2 is swizzled into the zero/undef vector, landing at element
/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, unsigned Idx,
- bool isZero, SelectionDAG &DAG) {
+ bool isZero, bool HasSSE2,
+ SelectionDAG &DAG) {
MVT::ValueType VT = V2.getValueType();
- SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT);
+ SDOperand V1 = isZero
+ ? getZeroVector(VT, HasSSE2, DAG) : DAG.getNode(ISD::UNDEF, VT);
unsigned NumElems = MVT::getVectorNumElements(V2.getValueType());
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
MVT::ValueType EVT = MVT::getVectorElementType(MaskVT);
bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
if (ThisIsNonZero && First) {
if (NumZero)
- V = getZeroVector(MVT::v8i16, DAG);
+ V = getZeroVector(MVT::v8i16, true, DAG);
else
V = DAG.getNode(ISD::UNDEF, MVT::v8i16);
First = false;
if (isNonZero) {
if (First) {
if (NumZero)
- V = getZeroVector(MVT::v8i16, DAG);
+ V = getZeroVector(MVT::v8i16, true, DAG);
else
V = DAG.getNode(ISD::UNDEF, MVT::v8i16);
First = false;
if (ISD::isBuildVectorAllOnes(Op.Val))
return getOnesVector(Op.getValueType(), DAG);
- return getZeroVector(Op.getValueType(), DAG);
+ return getZeroVector(Op.getValueType(), Subtarget->hasSSE2(), DAG);
}
MVT::ValueType VT = Op.getValueType();
// convert it to a vector with movd (S2V+shuffle to zero extend).
Item = DAG.getNode(ISD::TRUNCATE, MVT::i32, Item);
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VecVT, Item);
- Item = getShuffleVectorZeroOrUndef(Item, 0, true, DAG);
+ Item = getShuffleVectorZeroOrUndef(Item, 0, true,
+ Subtarget->hasSSE2(), DAG);
// Now we have our 32-bit value zero extended in the low element of
// a vector. If Idx != 0, swizzle it into place.
(EVT != MVT::i64 || Subtarget->is64Bit())) {
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item);
// Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
- return getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, DAG);
+ return getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0,
+ Subtarget->hasSSE2(), DAG);
}
if (IsAllConstants) // Otherwise, it's better to do a constpool load.
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item);
// Turn it into a shuffle of zero and zero-extended scalar to vector.
- Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, DAG);
+ Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0,
+ Subtarget->hasSSE2(), DAG);
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
SmallVector<SDOperand, 8> MaskVec;
unsigned Idx = CountTrailingZeros_32(NonZeros);
SDOperand V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT,
Op.getOperand(Idx));
- return getShuffleVectorZeroOrUndef(V2, Idx, true, DAG);
+ return getShuffleVectorZeroOrUndef(V2, Idx, true,
+ Subtarget->hasSSE2(), DAG);
}
return SDOperand();
}
for (unsigned i = 0; i < 4; ++i) {
bool isZero = !(NonZeros & (1 << i));
if (isZero)
- V[i] = getZeroVector(VT, DAG);
+ V[i] = getZeroVector(VT, Subtarget->hasSSE2(), DAG);
else
V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
}
return DAG.getNode(ISD::UNDEF, VT);
if (isZeroShuffle(Op.Val))
- return getZeroVector(VT, DAG);
+ return getZeroVector(VT, Subtarget->hasSSE2(), DAG);
if (isIdentityMask(PermMask.Val))
return V1;