From d9bc309e9c08dd6b1db732c6b3d32ebbe6683851 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Tue, 25 Oct 2016 22:01:09 +0000 Subject: [PATCH] [DAGCombiner] Enable (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1)) combine for splatted vectors git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@285129 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 6 ++-- test/CodeGen/X86/combine-urem.ll | 53 +++++--------------------------- 2 files changed, 11 insertions(+), 48 deletions(-) diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index f8953f0fb49..81fb69f517f 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2482,9 +2482,9 @@ SDValue DAGCombiner::visitREM(SDNode *N) { } // fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1)) if (N1.getOpcode() == ISD::SHL) { - ConstantSDNode *SHC = getAsNonOpaqueConstant(N1.getOperand(0)); - if (SHC && SHC->getAPIntValue().isPowerOf2()) { - APInt NegOne = APInt::getAllOnesValue(VT.getSizeInBits()); + ConstantSDNode *SHC = isConstOrConstSplat(N1.getOperand(0)); + if (SHC && !SHC->isOpaque() && SHC->getAPIntValue().isPowerOf2()) { + APInt NegOne = APInt::getAllOnesValue(VT.getScalarSizeInBits()); SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N1, DAG.getConstant(NegOne, DL, VT)); AddToWorklist(Add.getNode()); diff --git a/test/CodeGen/X86/combine-urem.ll b/test/CodeGen/X86/combine-urem.ll index c043db5b542..0dc10164a5c 100644 --- a/test/CodeGen/X86/combine-urem.ll +++ b/test/CodeGen/X86/combine-urem.ll @@ -88,57 +88,20 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) { ; SSE: # BB#0: ; SSE-NEXT: pslld $23, %xmm1 ; SSE-NEXT: paddd {{.*}}(%rip), %xmm1 -; SSE-NEXT: cvttps2dq %xmm1, %xmm2 -; SSE-NEXT: pslld $2, %xmm2 -; SSE-NEXT: pextrd $1, %xmm0, %eax -; SSE-NEXT: pextrd $1, %xmm2, %ecx -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %ecx -; SSE-NEXT: movl %edx, %ecx -; SSE-NEXT: movd %xmm0, %eax -; SSE-NEXT: movd %xmm2, %esi -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %esi -; SSE-NEXT: movd %edx, %xmm1 -; SSE-NEXT: pinsrd $1, %ecx, %xmm1 -; SSE-NEXT: pextrd $2, %xmm0, %eax -; SSE-NEXT: pextrd $2, %xmm2, %ecx -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %ecx -; SSE-NEXT: pinsrd $2, %edx, %xmm1 -; SSE-NEXT: pextrd $3, %xmm0, %eax -; SSE-NEXT: pextrd $3, %xmm2, %ecx -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %ecx -; SSE-NEXT: pinsrd $3, %edx, %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: cvttps2dq %xmm1, %xmm1 +; SSE-NEXT: pslld $2, %xmm1 +; SSE-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE-NEXT: paddd %xmm1, %xmm2 +; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_urem_by_shl_pow2a: ; AVX: # BB#0: ; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 ; AVX-NEXT: vpsllvd %xmm1, %xmm2, %xmm1 -; AVX-NEXT: vpextrd $1, %xmm1, %ecx -; AVX-NEXT: vpextrd $1, %xmm0, %eax -; AVX-NEXT: xorl %edx, %edx -; AVX-NEXT: divl %ecx -; AVX-NEXT: movl %edx, %ecx -; AVX-NEXT: vmovd %xmm1, %esi -; AVX-NEXT: vmovd %xmm0, %eax -; AVX-NEXT: xorl %edx, %edx -; AVX-NEXT: divl %esi -; AVX-NEXT: vmovd %edx, %xmm2 -; AVX-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 -; AVX-NEXT: vpextrd $2, %xmm1, %ecx -; AVX-NEXT: vpextrd $2, %xmm0, %eax -; AVX-NEXT: xorl %edx, %edx -; AVX-NEXT: divl %ecx -; AVX-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2 -; AVX-NEXT: vpextrd $3, %xmm1, %ecx -; AVX-NEXT: vpextrd $3, %xmm0, %eax -; AVX-NEXT: xorl %edx, %edx -; AVX-NEXT: divl %ecx -; AVX-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0 +; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %1 = shl <4 x i32> , %y %2 = urem <4 x i32> %x, %1 -- 2.11.0