From f3db8f562ffc584376bdd06ea37a2228e1e9dd9c Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Mon, 21 Nov 2016 12:05:49 +0000 Subject: [PATCH] [X86][SSE] Allow PACKSS to be used to truncate any type of all/none sign bits input At the moment we only use truncateVectorCompareWithPACKSS with direct vector comparison results (just one example of a known all/none signbits input). This change relaxes the direct matching of a SETCC opcode by moving the logic up into SelectionDAG::ComputeNumSignBits and accepting any input with a known splatted signbit. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@287535 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 32 ++++++++++---------- test/CodeGen/X86/packss.ll | 62 +++++++++++++++----------------------- 2 files changed, 41 insertions(+), 53 deletions(-) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 5e84ce29793..d98b3c35c06 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -14664,8 +14664,8 @@ static SDValue truncateVectorCompareWithPACKSS(EVT DstVT, SDValue In, const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget) { - // AVX512 has fast truncate. - if (Subtarget.hasAVX512()) + // Requires SSE2 but AVX512 has fast truncate. + if (!Subtarget.hasSSE2() || Subtarget.hasAVX512()) return SDValue(); EVT SrcVT = In.getValueType(); @@ -31020,22 +31020,20 @@ static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG, return SDValue(); } -/// This function transforms vector truncation of comparison results from +/// This function transforms vector truncation of 'all or none' bits values. /// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS operations. -static SDValue combineVectorCompareTruncation(SDNode *N, SDLoc &DL, - SelectionDAG &DAG, - const X86Subtarget &Subtarget) { - // AVX512 has fast truncate. - if (Subtarget.hasAVX512()) +static SDValue combineVectorSignBitsTruncation(SDNode *N, SDLoc &DL, + SelectionDAG &DAG, + const X86Subtarget &Subtarget) { + // Requires SSE2 but AVX512 has fast truncate. + if (!Subtarget.hasSSE2() || Subtarget.hasAVX512()) return SDValue(); if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple()) return SDValue(); - // TODO: we should be able to support sources other than compares as long - // as we are saturating+packing zero/all bits only. SDValue In = N->getOperand(0); - if (In.getOpcode() != ISD::SETCC || !In.getValueType().isSimple()) + if (!In.getValueType().isSimple()) return SDValue(); MVT VT = N->getValueType(0).getSimpleVT(); @@ -31044,9 +31042,11 @@ static SDValue combineVectorCompareTruncation(SDNode *N, SDLoc &DL, MVT InVT = In.getValueType().getSimpleVT(); MVT InSVT = InVT.getScalarType(); - assert(DAG.getTargetLoweringInfo().getBooleanContents(InVT) == - TargetLoweringBase::ZeroOrNegativeOneBooleanContent && - "Expected comparison result to be zero/all bits"); + // Use PACKSS if the input is a splatted sign bit. + // e.g. Comparison result, sext_in_reg, etc. + unsigned NumSignBits = DAG.ComputeNumSignBits(In); + if (NumSignBits != InSVT.getSizeInBits()) + return SDValue(); // Check we have a truncation suited for PACKSS. if (!VT.is128BitVector() && !VT.is256BitVector()) @@ -31077,8 +31077,8 @@ static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG, return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc); } - // Try to truncate vector comparison results with PACKSS. - if (SDValue V = combineVectorCompareTruncation(N, DL, DAG, Subtarget)) + // Try to truncate extended sign bits with PACKSS. + if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget)) return V; return combineVectorTruncation(N, DAG, Subtarget); diff --git a/test/CodeGen/X86/packss.ll b/test/CodeGen/X86/packss.ll index 72842c6976c..44ecb40a7a4 100644 --- a/test/CodeGen/X86/packss.ll +++ b/test/CodeGen/X86/packss.ll @@ -7,39 +7,39 @@ define <4 x i32> @trunc_ashr_v4i64(<4 x i64> %a) nounwind { ; X32-SSE-LABEL: trunc_ashr_v4i64: ; X32-SSE: # BB#0: -; X32-SSE-NEXT: psrad $31, %xmm0 ; X32-SSE-NEXT: psrad $31, %xmm1 -; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] -; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] -; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; X32-SSE-NEXT: psrad $31, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; X32-SSE-NEXT: packsswb %xmm1, %xmm0 ; X32-SSE-NEXT: retl ; ; X64-SSE-LABEL: trunc_ashr_v4i64: ; X64-SSE: # BB#0: -; X64-SSE-NEXT: psrad $31, %xmm0 ; X64-SSE-NEXT: psrad $31, %xmm1 -; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] -; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] -; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; X64-SSE-NEXT: psrad $31, %xmm0 +; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; X64-SSE-NEXT: packsswb %xmm1, %xmm0 ; X64-SSE-NEXT: retq ; ; X64-AVX1-LABEL: trunc_ashr_v4i64: ; X64-AVX1: # BB#0: -; X64-AVX1-NEXT: vpsrad $31, %xmm0, %xmm1 -; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; X64-AVX1-NEXT: vpsrad $31, %xmm1, %xmm1 +; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] ; X64-AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 -; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] -; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] -; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; X64-AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq ; ; X64-AVX2-LABEL: trunc_ashr_v4i64: ; X64-AVX2: # BB#0: ; X64-AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 -; X64-AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,3,2,3,5,7,6,7] -; X64-AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; X64-AVX2-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] +; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; X64-AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq %1 = ashr <4 x i64> %a, @@ -50,44 +50,32 @@ define <4 x i32> @trunc_ashr_v4i64(<4 x i64> %a) nounwind { define <8 x i16> @trunc_ashr_v8i32(<8 x i32> %a) nounwind { ; X32-SSE-LABEL: trunc_ashr_v8i32: ; X32-SSE: # BB#0: -; X32-SSE-NEXT: psrad $31, %xmm0 ; X32-SSE-NEXT: psrad $31, %xmm1 -; X32-SSE-NEXT: pslld $16, %xmm1 -; X32-SSE-NEXT: psrad $16, %xmm1 -; X32-SSE-NEXT: pslld $16, %xmm0 -; X32-SSE-NEXT: psrad $16, %xmm0 -; X32-SSE-NEXT: packssdw %xmm1, %xmm0 +; X32-SSE-NEXT: psrad $31, %xmm0 +; X32-SSE-NEXT: packsswb %xmm1, %xmm0 ; X32-SSE-NEXT: retl ; ; X64-SSE-LABEL: trunc_ashr_v8i32: ; X64-SSE: # BB#0: -; X64-SSE-NEXT: psrad $31, %xmm0 ; X64-SSE-NEXT: psrad $31, %xmm1 -; X64-SSE-NEXT: pslld $16, %xmm1 -; X64-SSE-NEXT: psrad $16, %xmm1 -; X64-SSE-NEXT: pslld $16, %xmm0 -; X64-SSE-NEXT: psrad $16, %xmm0 -; X64-SSE-NEXT: packssdw %xmm1, %xmm0 +; X64-SSE-NEXT: psrad $31, %xmm0 +; X64-SSE-NEXT: packsswb %xmm1, %xmm0 ; X64-SSE-NEXT: retq ; ; X64-AVX1-LABEL: trunc_ashr_v8i32: ; X64-AVX1: # BB#0: -; X64-AVX1-NEXT: vpsrad $31, %xmm0, %xmm1 -; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; X64-AVX1-NEXT: vpsrad $31, %xmm1, %xmm1 ; X64-AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 -; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; X64-AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; X64-AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; X64-AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; X64-AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq ; ; X64-AVX2-LABEL: trunc_ashr_v8i32: ; X64-AVX2: # BB#0: ; X64-AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 -; X64-AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero -; X64-AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; X64-AVX2-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; X64-AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq %1 = ashr <8 x i32> %a, -- 2.11.0