From dd17bc5daa1ceab1a6c3d75ac0b6d4c10350561c Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sun, 5 Jun 2016 05:35:11 +0000 Subject: [PATCH] [AVX512] Fix PANDN combining for v4i32/v8i32 when VLX is enabled. v4i32/v8i32 ANDs aren't promoted to v2i64/v4i64 when VLX is enabled. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@271826 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 3 ++- test/CodeGen/X86/avx512vl-logic.ll | 48 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+), 1 deletion(-) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 5a7c2406861..96e0c5564fc 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -27117,7 +27117,8 @@ static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) { SDValue N1 = N->getOperand(1); SDLoc DL(N); - if (VT != MVT::v2i64 && VT != MVT::v4i64) + if (VT != MVT::v2i64 && VT != MVT::v4i64 && + VT != MVT::v4i32 && VT != MVT::v8i32) // Legal with VLX return SDValue(); // Canonicalize XOR to the left. diff --git a/test/CodeGen/X86/avx512vl-logic.ll b/test/CodeGen/X86/avx512vl-logic.ll index 02cb8f97865..d6e1a7dd539 100644 --- a/test/CodeGen/X86/avx512vl-logic.ll +++ b/test/CodeGen/X86/avx512vl-logic.ll @@ -13,6 +13,18 @@ entry: ret <8 x i32> %x } +; CHECK-LABEL: vpandnd256 +; CHECK: vpandnd %ymm +; CHECK: ret +define <8 x i32> @vpandnd256(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone ssp { +entry: + ; Force the execution domain with an add. + %a2 = add <8 x i32> %a, + %b2 = xor <8 x i32> %a, + %x = and <8 x i32> %a2, %b2 + ret <8 x i32> %x +} + ; CHECK-LABEL: vpord256 ; CHECK: vpord %ymm ; CHECK: ret @@ -46,6 +58,18 @@ entry: ret <4 x i64> %x } +; CHECK-LABEL: vpandnq256 +; CHECK: vpandnq %ymm +; CHECK: ret +define <4 x i64> @vpandnq256(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp { +entry: + ; Force the execution domain with an add. + %a2 = add <4 x i64> %a, + %b2 = xor <4 x i64> %b, + %x = and <4 x i64> %a2, %b2 + ret <4 x i64> %x +} + ; CHECK-LABEL: vporq256 ; CHECK: vporq %ymm ; CHECK: ret @@ -81,6 +105,18 @@ entry: ret <4 x i32> %x } +; CHECK-LABEL: vpandnd128 +; CHECK: vpandnd %xmm +; CHECK: ret +define <4 x i32> @vpandnd128(<4 x i32> %a, <4 x i32> %b) nounwind uwtable readnone ssp { +entry: + ; Force the execution domain with an add. + %a2 = add <4 x i32> %a, + %b2 = xor <4 x i32> %b, + %x = and <4 x i32> %a2, %b2 + ret <4 x i32> %x +} + ; CHECK-LABEL: vpord128 ; CHECK: vpord %xmm ; CHECK: ret @@ -114,6 +150,18 @@ entry: ret <2 x i64> %x } +; CHECK-LABEL: vpandnq128 +; CHECK: vpandnq %xmm +; CHECK: ret +define <2 x i64> @vpandnq128(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp { +entry: + ; Force the execution domain with an add. + %a2 = add <2 x i64> %a, + %b2 = xor <2 x i64> %b, + %x = and <2 x i64> %a2, %b2 + ret <2 x i64> %x +} + ; CHECK-LABEL: vporq128 ; CHECK: vporq %xmm ; CHECK: ret -- 2.11.0