From: Craig Topper Date: Sat, 21 Jan 2017 06:59:38 +0000 (+0000) Subject: [X86] Don't allow commuting to form phsub operations. X-Git-Tag: android-x86-7.1-r4~21630 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=932a0813854ff6c9b49d7c94bd1c4004312031f1;p=android-x86%2Fexternal-llvm.git [X86] Don't allow commuting to form phsub operations. Fixes PR31714. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@292713 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 0d65c277725..d1e4846928c 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -33864,11 +33864,11 @@ static SDValue combineSub(SDNode *N, SelectionDAG &DAG, } } - // Try to synthesize horizontal adds from adds of shuffles. + // Try to synthesize horizontal subs from subs of shuffles. EVT VT = N->getValueType(0); if (((Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || (Subtarget.hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && - isHorizontalBinOp(Op0, Op1, true)) + isHorizontalBinOp(Op0, Op1, false)) return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1); return OptimizeConditionalInDecrement(N, DAG); diff --git a/test/CodeGen/X86/phaddsub.ll b/test/CodeGen/X86/phaddsub.ll index 9d09b855add..08015258867 100644 --- a/test/CodeGen/X86/phaddsub.ll +++ b/test/CodeGen/X86/phaddsub.ll @@ -229,12 +229,31 @@ define <4 x i32> @phsubd4(<4 x i32> %x) { define <8 x i16> @phsubw1_reverse(<8 x i16> %x, <8 x i16> %y) { ; SSSE3-LABEL: phsubw1_reverse: ; SSSE3: # BB#0: -; SSSE3-NEXT: phsubw %xmm1, %xmm0 +; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15] +; SSSE3-NEXT: movdqa %xmm1, %xmm4 +; SSSE3-NEXT: pshufb %xmm3, %xmm4 +; SSSE3-NEXT: movdqa %xmm0, %xmm2 +; SSSE3-NEXT: pshufb %xmm3, %xmm2 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] +; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; SSSE3-NEXT: pshufb %xmm3, %xmm1 +; SSSE3-NEXT: pshufb %xmm3, %xmm0 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSSE3-NEXT: psubw %xmm0, %xmm2 +; SSSE3-NEXT: movdqa %xmm2, %xmm0 ; SSSE3-NEXT: retq ; ; AVX-LABEL: phsubw1_reverse: ; AVX: # BB#0: -; AVX-NEXT: vphsubw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15] +; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm3 +; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm2 +; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; AVX-NEXT: vpshufb %xmm3, %xmm1, %xmm1 +; AVX-NEXT: vpshufb %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX-NEXT: vpsubw %xmm0, %xmm2, %xmm0 ; AVX-NEXT: retq %a = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> %b = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> @@ -245,12 +264,18 @@ define <8 x i16> @phsubw1_reverse(<8 x i16> %x, <8 x i16> %y) { define <4 x i32> @phsubd1_reverse(<4 x i32> %x, <4 x i32> %y) { ; SSSE3-LABEL: phsubd1_reverse: ; SSSE3: # BB#0: -; SSSE3-NEXT: phsubd %xmm1, %xmm0 +; SSSE3-NEXT: movaps %xmm0, %xmm2 +; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[1,3] +; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; SSSE3-NEXT: psubd %xmm0, %xmm2 +; SSSE3-NEXT: movdqa %xmm2, %xmm0 ; SSSE3-NEXT: retq ; ; AVX-LABEL: phsubd1_reverse: ; AVX: # BB#0: -; AVX-NEXT: vphsubd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm0[1,3],xmm1[1,3] +; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; AVX-NEXT: vpsubd %xmm0, %xmm2, %xmm0 ; AVX-NEXT: retq %a = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> %b = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32>