From 7e6fcc775f56cdeeae061f6f8071f5c103087330 Mon Sep 17 00:00:00 2001 From: Benjamin Kramer Date: Wed, 27 Dec 2017 13:31:50 +0000 Subject: [PATCH] [X86] Fix vmul combine for AVX1 targets. v8i32 is legal von AVX1, but it doesn't have pmuludq for it. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@321490 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 4 ++++ test/CodeGen/X86/combine-pmuldq.ll | 44 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index ae5100e9bb4..7d2bfd421e4 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -32431,6 +32431,10 @@ static SDValue combineVMUL(SDNode *N, SelectionDAG &DAG, if (VT.getScalarType() != MVT::i64) return SDValue(); + // Don't try to lower 256 bit integer vectors on AVX1 targets. + if (!Subtarget.hasAVX2() && VT.getVectorNumElements() > 2) + return SDValue(); + MVT MulVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2); SDValue LHS = N->getOperand(0); diff --git a/test/CodeGen/X86/combine-pmuldq.ll b/test/CodeGen/X86/combine-pmuldq.ll index 421c948efab..0c7b8d6f4c5 100644 --- a/test/CodeGen/X86/combine-pmuldq.ll +++ b/test/CodeGen/X86/combine-pmuldq.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=AVX --check-prefix=AVX512VL ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefix=AVX --check-prefix=AVX512DQVL @@ -142,3 +143,46 @@ define <4 x i64> @combine_shuffle_zero_pmuludq_256(<8 x i32> %a0, <8 x i32> %a1) %5 = mul <4 x i64> %3, %4 ret <4 x i64> %5 } + +define <8 x i64> @combine_zext_pmuludq_256(<8 x i32> %a) { +; SSE-LABEL: combine_zext_pmuludq_256: +; SSE: # %bb.0: +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; SSE-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero +; SSE-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero +; SSE-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [715827883,715827883] +; SSE-NEXT: pmuludq %xmm1, %xmm0 +; SSE-NEXT: pmuludq %xmm1, %xmm2 +; SSE-NEXT: pmuludq %xmm1, %xmm4 +; SSE-NEXT: pmuludq %xmm1, %xmm3 +; SSE-NEXT: movdqa %xmm4, %xmm1 +; SSE-NEXT: retq +; +; AVX2-LABEL: combine_zext_pmuludq_256: +; AVX2: # %bb.0: +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [715827883,715827883,715827883,715827883] +; AVX2-NEXT: vpmuludq %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpmuludq %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: retq +; +; AVX512VL-LABEL: combine_zext_pmuludq_256: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero +; AVX512VL-NEXT: vpmuludq {{.*}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512VL-NEXT: retq +; +; AVX512DQVL-LABEL: combine_zext_pmuludq_256: +; AVX512DQVL: # %bb.0: +; AVX512DQVL-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero +; AVX512DQVL-NEXT: vpmuludq {{.*}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512DQVL-NEXT: retq + %1 = zext <8 x i32> %a to <8 x i64> + %2 = mul nuw nsw <8 x i64> %1, + ret <8 x i64> %2 +} -- 2.11.0