From dba67a4fdb6b7b7dce9b959b6d7d2963b1c642b6 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sun, 8 May 2016 21:33:53 +0000 Subject: [PATCH] [AVX512] Add VLX 128/256-bit SET0 operations that encode to 128/256-bit EVEX encoded VPXORD so all 32 registers can be used. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@268884 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrAVX512.td | 8 ++++++ lib/Target/X86/X86InstrInfo.cpp | 4 +++ lib/Target/X86/X86InstrSSE.td | 5 ++-- test/CodeGen/X86/avx512-arith.ll | 10 ++++---- test/CodeGen/X86/avx512-calling-conv.ll | 15 +++++++---- test/CodeGen/X86/avx512-vec-cmp.ll | 10 ++++---- test/CodeGen/X86/avx512vbmivl-intrinsics.ll | 8 +++--- test/CodeGen/X86/avx512vl-intrinsics.ll | 16 ++++++------ test/CodeGen/X86/avx512vl-vbroadcast.ll | 12 ++++----- test/CodeGen/X86/fma_patterns.ll | 5 ++-- test/CodeGen/X86/masked_memop.ll | 40 ++++++++++++++--------------- test/CodeGen/X86/vector-shuffle-128-v2.ll | 33 ++++++++++++++++-------- test/CodeGen/X86/vector-tzcnt-128.ll | 14 +++++----- test/CodeGen/X86/vector-tzcnt-256.ll | 16 ++++++------ 14 files changed, 114 insertions(+), 82 deletions(-) diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td index a0d503a1e3d..242b21dc177 100644 --- a/lib/Target/X86/X86InstrAVX512.td +++ b/lib/Target/X86/X86InstrAVX512.td @@ -483,6 +483,14 @@ def AVX512_512_SET0 : I<0, Pseudo, (outs VR512:$dst), (ins), "", [(set VR512:$dst, (v16i32 immAllZerosV))]>; } +let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, + isPseudo = 1, Predicates = [HasVLX] in { +def AVX512_128_SET0 : I<0, Pseudo, (outs VR128X:$dst), (ins), "", + [(set VR128X:$dst, (v4i32 immAllZerosV))]>; +def AVX512_256_SET0 : I<0, Pseudo, (outs VR256X:$dst), (ins), "", + [(set VR256X:$dst, (v8i32 immAllZerosV))]>; +} + //===----------------------------------------------------------------------===// // AVX-512 - VECTOR INSERT // diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 035c114c579..7208f5a2f5a 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -5512,6 +5512,10 @@ bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { case X86::AVX_SET0: assert(HasAVX && "AVX not supported"); return Expand2AddrUndef(MIB, get(X86::VXORPSYrr)); + case X86::AVX512_128_SET0: + return Expand2AddrUndef(MIB, get(X86::VPXORDZ128rr)); + case X86::AVX512_256_SET0: + return Expand2AddrUndef(MIB, get(X86::VPXORDZ256rr)); case X86::AVX512_512_SET0: return Expand2AddrUndef(MIB, get(X86::VPXORDZrr)); case X86::V_SETALLONES: diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 3e98eb88048..814247899ce 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -472,11 +472,12 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, // We set canFoldAsLoad because this can be converted to a constant-pool // load of an all-zeros value if folding it would be beneficial. let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, - isPseudo = 1, SchedRW = [WriteZero] in { + isPseudo = 1, Predicates = [NoVLX], SchedRW = [WriteZero] in { def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "", [(set VR128:$dst, (v4f32 immAllZerosV))]>; } +let Predicates = [NoVLX] in def : Pat<(v4i32 immAllZerosV), (V_SET0)>; @@ -485,7 +486,7 @@ def : Pat<(v4i32 immAllZerosV), (V_SET0)>; // at the rename stage without using any execution unit, so SET0PSY // and SET0PDY can be used for vector int instructions without penalty let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, - isPseudo = 1, Predicates = [HasAVX], SchedRW = [WriteZero] in { + isPseudo = 1, Predicates = [HasAVX, NoVLX], SchedRW = [WriteZero] in { def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "", [(set VR256:$dst, (v8i32 immAllZerosV))]>; } diff --git a/test/CodeGen/X86/avx512-arith.ll b/test/CodeGen/X86/avx512-arith.ll index acf9caa2342..515f571b1a4 100644 --- a/test/CodeGen/X86/avx512-arith.ll +++ b/test/CodeGen/X86/avx512-arith.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; NOTE: Assertions have been autogenerated by update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f | FileCheck --check-prefix=CHECK --check-prefix=AVX512F %s ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512vl | FileCheck --check-prefix=CHECK --check-prefix=AVX512VL %s ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512bw | FileCheck --check-prefix=CHECK --check-prefix=AVX512BW %s @@ -682,7 +682,7 @@ define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i, ; ; AVX512VL-LABEL: test_mask_vminpd: ; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpxor %ymm4, %ymm4, %ymm4 +; AVX512VL-NEXT: vpxord %ymm4, %ymm4, %ymm4 ; AVX512VL-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ; AVX512VL-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1} ; AVX512VL-NEXT: retq @@ -703,7 +703,7 @@ define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i, ; ; SKX-LABEL: test_mask_vminpd: ; SKX: ## BB#0: -; SKX-NEXT: vpxor %ymm4, %ymm4, %ymm4 +; SKX-NEXT: vpxord %ymm4, %ymm4, %ymm4 ; SKX-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ; SKX-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1} ; SKX-NEXT: retq @@ -742,7 +742,7 @@ define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i, ; ; AVX512VL-LABEL: test_mask_vmaxpd: ; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpxor %ymm4, %ymm4, %ymm4 +; AVX512VL-NEXT: vpxord %ymm4, %ymm4, %ymm4 ; AVX512VL-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ; AVX512VL-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1} ; AVX512VL-NEXT: retq @@ -763,7 +763,7 @@ define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i, ; ; SKX-LABEL: test_mask_vmaxpd: ; SKX: ## BB#0: -; SKX-NEXT: vpxor %ymm4, %ymm4, %ymm4 +; SKX-NEXT: vpxord %ymm4, %ymm4, %ymm4 ; SKX-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ; SKX-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1} ; SKX-NEXT: retq diff --git a/test/CodeGen/X86/avx512-calling-conv.ll b/test/CodeGen/X86/avx512-calling-conv.ll index 49a28319e48..9e14ec026bc 100644 --- a/test/CodeGen/X86/avx512-calling-conv.ll +++ b/test/CodeGen/X86/avx512-calling-conv.ll @@ -1,13 +1,18 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; NOTE: Assertions have been autogenerated by update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s --check-prefix=ALL_X64 --check-prefix=KNL ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s --check-prefix=ALL_X64 --check-prefix=SKX ; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=knl | FileCheck %s --check-prefix=KNL_X32 define <16 x i1> @test1() { -; ALL_X64-LABEL: test1: -; ALL_X64: ## BB#0: -; ALL_X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; ALL_X64-NEXT: retq +; KNL-LABEL: test1: +; KNL: ## BB#0: +; KNL-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; KNL-NEXT: retq +; +; SKX-LABEL: test1: +; SKX: ## BB#0: +; SKX-NEXT: vpxord %xmm0, %xmm0, %xmm0 +; SKX-NEXT: retq ; ; KNL_X32-LABEL: test1: ; KNL_X32: ## BB#0: diff --git a/test/CodeGen/X86/avx512-vec-cmp.ll b/test/CodeGen/X86/avx512-vec-cmp.ll index 0727a0956a6..9099ae011be 100644 --- a/test/CodeGen/X86/avx512-vec-cmp.ll +++ b/test/CodeGen/X86/avx512-vec-cmp.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; NOTE: Assertions have been autogenerated by update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s --check-prefix=KNL ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s --check-prefix=SKX @@ -127,7 +127,7 @@ define <4 x float> @test7(<4 x float> %a, <4 x float> %b) { ; ; SKX-LABEL: test7: ; SKX: ## BB#0: -; SKX-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; SKX-NEXT: vpxord %xmm2, %xmm2, %xmm2 ; SKX-NEXT: vcmpltps %xmm2, %xmm0, %k1 ; SKX-NEXT: vmovaps %xmm0, %xmm1 {%k1} ; SKX-NEXT: vmovaps %zmm1, %zmm0 @@ -148,7 +148,7 @@ define <2 x double> @test8(<2 x double> %a, <2 x double> %b) { ; ; SKX-LABEL: test8: ; SKX: ## BB#0: -; SKX-NEXT: vxorpd %xmm2, %xmm2, %xmm2 +; SKX-NEXT: vpxord %xmm2, %xmm2, %xmm2 ; SKX-NEXT: vcmpltpd %xmm2, %xmm0, %k1 ; SKX-NEXT: vmovapd %xmm0, %xmm1 {%k1} ; SKX-NEXT: vmovaps %zmm1, %zmm0 @@ -969,7 +969,7 @@ define <4 x i32> @test44(<4 x i16> %x, <4 x i16> %y) #0 { ; ; SKX-LABEL: test44: ; SKX: ## BB#0: -; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; SKX-NEXT: vpxord %xmm2, %xmm2, %xmm2 ; SKX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] ; SKX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7] ; SKX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 @@ -992,7 +992,7 @@ define <2 x i64> @test45(<2 x i16> %x, <2 x i16> %y) #0 { ; ; SKX-LABEL: test45: ; SKX: ## BB#0: -; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; SKX-NEXT: vpxord %xmm2, %xmm2, %xmm2 ; SKX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7] ; SKX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7] ; SKX-NEXT: vpcmpeqq %xmm1, %xmm0, %k1 diff --git a/test/CodeGen/X86/avx512vbmivl-intrinsics.ll b/test/CodeGen/X86/avx512vbmivl-intrinsics.ll index 8b11bbd55e4..6f7abca0a72 100644 --- a/test/CodeGen/X86/avx512vbmivl-intrinsics.ll +++ b/test/CodeGen/X86/avx512vbmivl-intrinsics.ll @@ -89,7 +89,7 @@ define <16 x i8>@test_int_x86_avx512_mask_vpermi2var_qi_128(<16 x i8> %x0, <16 x ; CHECK-NEXT: vmovaps %zmm1, %zmm3 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd9] ; CHECK-NEXT: vpermt2b %xmm2, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7d,0xda] ; CHECK-NEXT: vpermt2b %xmm2, %xmm0, %xmm1 ## encoding: [0x62,0xf2,0x7d,0x08,0x7d,0xca] -; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpxord %xmm4, %xmm4, %xmm4 ## encoding: [0x62,0xf1,0x5d,0x08,0xef,0xe4] ; CHECK-NEXT: vpermt2b %xmm2, %xmm0, %xmm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x7d,0xe2] ; CHECK-NEXT: vpaddb %xmm4, %xmm3, %xmm0 ## encoding: [0x62,0xf1,0x65,0x08,0xfc,0xc4] ; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0xfc,0xc1] @@ -111,7 +111,7 @@ define <32 x i8>@test_int_x86_avx512_mask_vpermi2var_qi_256(<32 x i8> %x0, <32 x ; CHECK-NEXT: vmovaps %zmm1, %zmm3 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd9] ; CHECK-NEXT: vpermt2b %ymm2, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7d,0xda] ; CHECK-NEXT: vpermt2b %ymm2, %ymm0, %ymm1 ## encoding: [0x62,0xf2,0x7d,0x28,0x7d,0xca] -; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpxord %ymm4, %ymm4, %ymm4 ## encoding: [0x62,0xf1,0x5d,0x28,0xef,0xe4] ; CHECK-NEXT: vpermt2b %ymm2, %ymm0, %ymm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x7d,0xe2] ; CHECK-NEXT: vpaddb %ymm4, %ymm3, %ymm0 ## encoding: [0x62,0xf1,0x65,0x28,0xfc,0xc4] ; CHECK-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0xfc,0xc1] @@ -133,7 +133,7 @@ define <16 x i8>@test_int_x86_avx512_mask_vpermt2var_qi_128(<16 x i8> %x0, <16 x ; CHECK-NEXT: vmovaps %zmm1, %zmm3 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd9] ; CHECK-NEXT: vpermt2b %xmm2, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7d,0xda] ; CHECK-NEXT: vpermt2b %xmm2, %xmm0, %xmm1 ## encoding: [0x62,0xf2,0x7d,0x08,0x7d,0xca] -; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpxord %xmm4, %xmm4, %xmm4 ## encoding: [0x62,0xf1,0x5d,0x08,0xef,0xe4] ; CHECK-NEXT: vpermt2b %xmm2, %xmm0, %xmm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x7d,0xe2] ; CHECK-NEXT: vpaddb %xmm4, %xmm3, %xmm0 ## encoding: [0x62,0xf1,0x65,0x08,0xfc,0xc4] ; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0xfc,0xc1] @@ -155,7 +155,7 @@ define <32 x i8>@test_int_x86_avx512_mask_vpermt2var_qi_256(<32 x i8> %x0, <32 x ; CHECK-NEXT: vmovaps %zmm1, %zmm3 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd9] ; CHECK-NEXT: vpermt2b %ymm2, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7d,0xda] ; CHECK-NEXT: vpermt2b %ymm2, %ymm0, %ymm1 ## encoding: [0x62,0xf2,0x7d,0x28,0x7d,0xca] -; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpxord %ymm4, %ymm4, %ymm4 ## encoding: [0x62,0xf1,0x5d,0x28,0xef,0xe4] ; CHECK-NEXT: vpermt2b %ymm2, %ymm0, %ymm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x7d,0xe2] ; CHECK-NEXT: vpaddb %ymm4, %ymm3, %ymm0 ## encoding: [0x62,0xf1,0x65,0x28,0xfc,0xc4] ; CHECK-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0xfc,0xc1] diff --git a/test/CodeGen/X86/avx512vl-intrinsics.ll b/test/CodeGen/X86/avx512vl-intrinsics.ll index 2a21ccd9e7c..1d8e5cc6805 100644 --- a/test/CodeGen/X86/avx512vl-intrinsics.ll +++ b/test/CodeGen/X86/avx512vl-intrinsics.ll @@ -7929,7 +7929,7 @@ define <2 x double>@test_int_x86_avx512_mask_fixupimm_pd_128(<2 x double> %x0, < ; CHECK: kmovw %edi, %k1 ; CHECK: vmovaps %zmm0, %zmm3 ; CHECK: vfixupimmpd $5, %xmm2, %xmm1, %xmm3 {%k1} -; CHECK: vpxor %xmm4, %xmm4, %xmm4 +; CHECK: vpxord %xmm4, %xmm4, %xmm4 ; CHECK: vfixupimmpd $4, %xmm2, %xmm1, %xmm4 {%k1} {z} ; CHECK: vfixupimmpd $3, %xmm2, %xmm1, %xmm0 ; CHECK: vaddpd %xmm4, %xmm3, %xmm1 @@ -7950,7 +7950,7 @@ define <2 x double>@test_int_x86_avx512_maskz_fixupimm_pd_128(<2 x double> %x0, ; CHECK: kmovw %edi, %k1 ; CHECK: vmovaps %zmm0, %zmm3 ; CHECK: vfixupimmpd $5, %xmm2, %xmm1, %xmm3 {%k1} {z} -; CHECK: vpxor %xmm2, %xmm2, %xmm2 +; CHECK: vpxord %xmm2, %xmm2, %xmm2 ; CHECK: vfixupimmpd $3, %xmm2, %xmm1, %xmm0 {%k1} {z} ; CHECK: vaddpd %xmm0, %xmm3, %xmm0 %res = call <2 x double> @llvm.x86.avx512.maskz.fixupimm.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 %x4) @@ -7968,7 +7968,7 @@ define <4 x double>@test_int_x86_avx512_mask_fixupimm_pd_256(<4 x double> %x0, < ; CHECK: kmovw %edi, %k1 ; CHECK: vmovaps %zmm0, %zmm3 ; CHECK: vfixupimmpd $4, %ymm2, %ymm1, %ymm3 {%k1} -; CHECK: vpxor %ymm4, %ymm4, %ymm4 +; CHECK: vpxord %ymm4, %ymm4, %ymm4 ; CHECK: vfixupimmpd $5, %ymm2, %ymm1, %ymm4 {%k1} {z} ; CHECK: vfixupimmpd $3, %ymm2, %ymm1, %ymm0 ; CHECK: vaddpd %ymm4, %ymm3, %ymm1 @@ -7989,7 +7989,7 @@ define <4 x double>@test_int_x86_avx512_maskz_fixupimm_pd_256(<4 x double> %x0, ; CHECK: kmovw %edi, %k1 ; CHECK: vmovaps %zmm0, %zmm3 ; CHECK: vfixupimmpd $5, %ymm2, %ymm1, %ymm3 {%k1} {z} -; CHECK: vpxor %ymm4, %ymm4, %ymm4 +; CHECK: vpxord %ymm4, %ymm4, %ymm4 ; CHECK: vmovaps %zmm0, %zmm5 ; CHECK: vfixupimmpd $4, %ymm4, %ymm1, %ymm5 {%k1} {z} ; CHECK: vfixupimmpd $3, %ymm2, %ymm1, %ymm0 @@ -8013,7 +8013,7 @@ define <4 x float>@test_int_x86_avx512_mask_fixupimm_ps_128(<4 x float> %x0, <4 ; CHECK: vfixupimmps $5, %xmm2, %xmm1, %xmm3 {%k1} ; CHECK: vmovaps %zmm0, %zmm4 ; CHECK: vfixupimmps $5, %xmm2, %xmm1, %xmm4 -; CHECK: vpxor %xmm2, %xmm2, %xmm2 +; CHECK: vpxord %xmm2, %xmm2, %xmm2 ; CHECK: vfixupimmps $5, %xmm2, %xmm1, %xmm0 {%k1} ; CHECK: vaddps %xmm0, %xmm3, %xmm0 ; CHECK: vaddps %xmm4, %xmm0, %xmm0 @@ -8035,7 +8035,7 @@ define <4 x float>@test_int_x86_avx512_maskz_fixupimm_ps_128(<4 x float> %x0, <4 ; CHECK: vfixupimmps $5, %xmm2, %xmm1, %xmm3 {%k1} {z} ; CHECK: vmovaps %zmm0, %zmm4 ; CHECK: vfixupimmps $5, %xmm2, %xmm1, %xmm4 -; CHECK: vpxor %xmm2, %xmm2, %xmm2 +; CHECK: vpxord %xmm2, %xmm2, %xmm2 ; CHECK: vfixupimmps $5, %xmm2, %xmm1, %xmm0 {%k1} {z} ; CHECK: vaddps %xmm0, %xmm3, %xmm0 ; CHECK: vaddps %xmm4, %xmm0, %xmm0 @@ -8057,7 +8057,7 @@ define <8 x float>@test_int_x86_avx512_mask_fixupimm_ps_256(<8 x float> %x0, <8 ; CHECK: vfixupimmps $5, %ymm2, %ymm1, %ymm3 {%k1} ; CHECK: vmovaps %zmm0, %zmm4 ; CHECK: vfixupimmps $5, %ymm2, %ymm1, %ymm4 -; CHECK: vpxor %ymm2, %ymm2, %ymm2 +; CHECK: vpxord %ymm2, %ymm2, %ymm2 ; CHECK: vfixupimmps $5, %ymm2, %ymm1, %ymm0 {%k1} ; CHECK: vaddps %ymm0, %ymm3, %ymm0 ; CHECK: vaddps %ymm4, %ymm0, %ymm0 @@ -8079,7 +8079,7 @@ define <8 x float>@test_int_x86_avx512_maskz_fixupimm_ps_256(<8 x float> %x0, <8 ; CHECK: vfixupimmps $5, %ymm2, %ymm1, %ymm3 {%k1} {z} ; CHECK: vmovaps %zmm0, %zmm4 ; CHECK: vfixupimmps $5, %ymm2, %ymm1, %ymm4 -; CHECK: vpxor %ymm2, %ymm2, %ymm2 +; CHECK: vpxord %ymm2, %ymm2, %ymm2 ; CHECK: vfixupimmps $5, %ymm2, %ymm1, %ymm0 {%k1} {z} ; CHECK: vaddps %ymm0, %ymm3, %ymm0 ; CHECK: vaddps %ymm4, %ymm0, %ymm0 diff --git a/test/CodeGen/X86/avx512vl-vbroadcast.ll b/test/CodeGen/X86/avx512vl-vbroadcast.ll index 5ffa0b2cba8..1132a0077b4 100644 --- a/test/CodeGen/X86/avx512vl-vbroadcast.ll +++ b/test/CodeGen/X86/avx512vl-vbroadcast.ll @@ -73,7 +73,7 @@ define <8 x float> @_inreg8xfloat(float %a) { define <8 x float> @_ss8xfloat_mask(<8 x float> %i, float %a, <8 x i32> %mask1) { ; CHECK-LABEL: _ss8xfloat_mask: ; CHECK: # BB#0: -; CHECK-NEXT: vpxor %ymm3, %ymm3, %ymm3 +; CHECK-NEXT: vpxord %ymm3, %ymm3, %ymm3 ; CHECK-NEXT: vpcmpneqd %ymm3, %ymm2, %k1 ; CHECK-NEXT: vbroadcastss %xmm1, %ymm0 {%k1} ; CHECK-NEXT: retq @@ -87,7 +87,7 @@ define <8 x float> @_ss8xfloat_mask(<8 x float> %i, float %a, <8 x i32> %mask1 define <8 x float> @_ss8xfloat_maskz(float %a, <8 x i32> %mask1) { ; CHECK-LABEL: _ss8xfloat_maskz: ; CHECK: # BB#0: -; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 +; CHECK-NEXT: vpxord %ymm2, %ymm2, %ymm2 ; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 {%k1} {z} ; CHECK-NEXT: retq @@ -111,7 +111,7 @@ define <4 x float> @_inreg4xfloat(float %a) { define <4 x float> @_ss4xfloat_mask(<4 x float> %i, float %a, <4 x i32> %mask1) { ; CHECK-LABEL: _ss4xfloat_mask: ; CHECK: # BB#0: -; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; CHECK-NEXT: vpxord %xmm3, %xmm3, %xmm3 ; CHECK-NEXT: vpcmpneqd %xmm3, %xmm2, %k1 ; CHECK-NEXT: vbroadcastss %xmm1, %xmm0 {%k1} ; CHECK-NEXT: retq @@ -125,7 +125,7 @@ define <4 x float> @_ss4xfloat_mask(<4 x float> %i, float %a, <4 x i32> %mask1 define <4 x float> @_ss4xfloat_maskz(float %a, <4 x i32> %mask1) { ; CHECK-LABEL: _ss4xfloat_maskz: ; CHECK: # BB#0: -; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; CHECK-NEXT: vpxord %xmm2, %xmm2, %xmm2 ; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ; CHECK-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: retq @@ -149,7 +149,7 @@ define <4 x double> @_inreg4xdouble(double %a) { define <4 x double> @_ss4xdouble_mask(<4 x double> %i, double %a, <4 x i32> %mask1) { ; CHECK-LABEL: _ss4xdouble_mask: ; CHECK: # BB#0: -; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; CHECK-NEXT: vpxord %xmm3, %xmm3, %xmm3 ; CHECK-NEXT: vpcmpneqd %xmm3, %xmm2, %k1 ; CHECK-NEXT: vbroadcastsd %xmm1, %ymm0 {%k1} ; CHECK-NEXT: retq @@ -163,7 +163,7 @@ define <4 x double> @_ss4xdouble_mask(<4 x double> %i, double %a, <4 x i32> %m define <4 x double> @_ss4xdouble_maskz(double %a, <4 x i32> %mask1) { ; CHECK-LABEL: _ss4xdouble_maskz: ; CHECK: # BB#0: -; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; CHECK-NEXT: vpxord %xmm2, %xmm2, %xmm2 ; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z} ; CHECK-NEXT: retq diff --git a/test/CodeGen/X86/fma_patterns.ll b/test/CodeGen/X86/fma_patterns.ll index 76a4acf00f9..5457c9fb59f 100644 --- a/test/CodeGen/X86/fma_patterns.ll +++ b/test/CodeGen/X86/fma_patterns.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by update_llc_test_checks.py ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma -fp-contract=fast | FileCheck %s --check-prefix=ALL --check-prefix=FMA ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma4,+fma -fp-contract=fast | FileCheck %s --check-prefix=ALL --check-prefix=FMA4 @@ -1150,7 +1151,7 @@ define <4 x float> @test_v4f32_fneg_fmul(<4 x float> %x, <4 x float> %y) #0 { ; ; AVX512-LABEL: test_v4f32_fneg_fmul: ; AVX512: # BB#0: -; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vpxord %xmm2, %xmm2, %xmm2 ; AVX512-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 ; AVX512-NEXT: retq %m = fmul nsz <4 x float> %x, %y @@ -1173,7 +1174,7 @@ define <4 x double> @test_v4f64_fneg_fmul(<4 x double> %x, <4 x double> %y) #0 { ; ; AVX512-LABEL: test_v4f64_fneg_fmul: ; AVX512: # BB#0: -; AVX512-NEXT: vxorps %ymm2, %ymm2, %ymm2 +; AVX512-NEXT: vpxord %ymm2, %ymm2, %ymm2 ; AVX512-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 ; AVX512-NEXT: retq %m = fmul nsz <4 x double> %x, %y diff --git a/test/CodeGen/X86/masked_memop.ll b/test/CodeGen/X86/masked_memop.ll index da74cf64eaa..fbe37f460c1 100644 --- a/test/CodeGen/X86/masked_memop.ll +++ b/test/CodeGen/X86/masked_memop.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; NOTE: Assertions have been autogenerated by update_llc_test_checks.py ; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 ; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx2 < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 ; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx512f < %s | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F @@ -204,7 +204,7 @@ define <8 x double> @test5(<8 x i32> %trigger, <8 x double>* %addr, <8 x double> ; ; SKX-LABEL: test5: ; SKX: ## BB#0: -; SKX-NEXT: vpxor %ymm2, %ymm2, %ymm2 +; SKX-NEXT: vpxord %ymm2, %ymm2, %ymm2 ; SKX-NEXT: vpcmpeqd %ymm2, %ymm0, %k1 ; SKX-NEXT: vmovupd (%rdi), %zmm1 {%k1} ; SKX-NEXT: vmovaps %zmm1, %zmm0 @@ -233,7 +233,7 @@ define <2 x double> @test6(<2 x i64> %trigger, <2 x double>* %addr, <2 x double> ; ; SKX-LABEL: test6: ; SKX: ## BB#0: -; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; SKX-NEXT: vpxord %xmm2, %xmm2, %xmm2 ; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k1 ; SKX-NEXT: vmovupd (%rdi), %xmm1 {%k1} ; SKX-NEXT: vmovaps %zmm1, %zmm0 @@ -262,7 +262,7 @@ define <4 x float> @test7(<4 x i32> %trigger, <4 x float>* %addr, <4 x float> %d ; ; SKX-LABEL: test7: ; SKX: ## BB#0: -; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; SKX-NEXT: vpxord %xmm2, %xmm2, %xmm2 ; SKX-NEXT: vpcmpeqd %xmm2, %xmm0, %k1 ; SKX-NEXT: vmovups (%rdi), %xmm1 {%k1} ; SKX-NEXT: vmovaps %zmm1, %zmm0 @@ -299,7 +299,7 @@ define <4 x i32> @test8(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %dst) { ; ; SKX-LABEL: test8: ; SKX: ## BB#0: -; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; SKX-NEXT: vpxord %xmm2, %xmm2, %xmm2 ; SKX-NEXT: vpcmpeqd %xmm2, %xmm0, %k1 ; SKX-NEXT: vmovdqu32 (%rdi), %xmm1 {%k1} ; SKX-NEXT: vmovaps %zmm1, %zmm0 @@ -333,7 +333,7 @@ define void @test9(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %val) { ; ; SKX-LABEL: test9: ; SKX: ## BB#0: -; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; SKX-NEXT: vpxord %xmm2, %xmm2, %xmm2 ; SKX-NEXT: vpcmpeqd %xmm2, %xmm0, %k1 ; SKX-NEXT: vmovdqu32 %xmm1, (%rdi) {%k1} ; SKX-NEXT: retq @@ -375,7 +375,7 @@ define <4 x double> @test10(<4 x i32> %trigger, <4 x double>* %addr, <4 x double ; ; SKX-LABEL: test10: ; SKX: ## BB#0: -; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; SKX-NEXT: vpxord %xmm2, %xmm2, %xmm2 ; SKX-NEXT: vpcmpeqd %xmm2, %xmm0, %k1 ; SKX-NEXT: vmovapd (%rdi), %ymm1 {%k1} ; SKX-NEXT: vmovaps %zmm1, %zmm0 @@ -415,7 +415,7 @@ define <4 x double> @test10b(<4 x i32> %trigger, <4 x double>* %addr, <4 x doubl ; ; SKX-LABEL: test10b: ; SKX: ## BB#0: -; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; SKX-NEXT: vpxord %xmm1, %xmm1, %xmm1 ; SKX-NEXT: vpcmpeqd %xmm1, %xmm0, %k1 ; SKX-NEXT: vmovapd (%rdi), %ymm0 {%k1} {z} ; SKX-NEXT: retq @@ -456,7 +456,7 @@ define <8 x float> @test11a(<8 x i32> %trigger, <8 x float>* %addr, <8 x float> ; ; SKX-LABEL: test11a: ; SKX: ## BB#0: -; SKX-NEXT: vpxor %ymm2, %ymm2, %ymm2 +; SKX-NEXT: vpxord %ymm2, %ymm2, %ymm2 ; SKX-NEXT: vpcmpeqd %ymm2, %ymm0, %k1 ; SKX-NEXT: vmovaps (%rdi), %ymm1 {%k1} ; SKX-NEXT: vmovaps %zmm1, %zmm0 @@ -624,7 +624,7 @@ define void @test12(<8 x i32> %trigger, <8 x i32>* %addr, <8 x i32> %val) { ; ; SKX-LABEL: test12: ; SKX: ## BB#0: -; SKX-NEXT: vpxor %ymm2, %ymm2, %ymm2 +; SKX-NEXT: vpxord %ymm2, %ymm2, %ymm2 ; SKX-NEXT: vpcmpeqd %ymm2, %ymm0, %k1 ; SKX-NEXT: vmovdqu32 %ymm1, (%rdi) {%k1} ; SKX-NEXT: retq @@ -704,7 +704,7 @@ define void @test14(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %val) { ; ; SKX-LABEL: test14: ; SKX: ## BB#0: -; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; SKX-NEXT: vpxord %xmm2, %xmm2, %xmm2 ; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] ; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0 ; SKX-NEXT: kshiftlw $14, %k0, %k0 @@ -752,7 +752,7 @@ define void @test15(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %val) { ; ; SKX-LABEL: test15: ; SKX: ## BB#0: -; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; SKX-NEXT: vpxord %xmm2, %xmm2, %xmm2 ; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] ; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k1 ; SKX-NEXT: vpmovqd %xmm1, (%rdi) {%k1} @@ -798,7 +798,7 @@ define <2 x float> @test16(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> % ; ; SKX-LABEL: test16: ; SKX: ## BB#0: -; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; SKX-NEXT: vpxord %xmm2, %xmm2, %xmm2 ; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] ; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0 ; SKX-NEXT: kshiftlw $14, %k0, %k0 @@ -853,7 +853,7 @@ define <2 x i32> @test17(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %dst) { ; ; SKX-LABEL: test17: ; SKX: ## BB#0: -; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; SKX-NEXT: vpxord %xmm2, %xmm2, %xmm2 ; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] ; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0 ; SKX-NEXT: kshiftlw $14, %k0, %k0 @@ -900,7 +900,7 @@ define <2 x float> @test18(<2 x i32> %trigger, <2 x float>* %addr) { ; ; SKX-LABEL: test18: ; SKX: ## BB#0: -; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; SKX-NEXT: vpxord %xmm1, %xmm1, %xmm1 ; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] ; SKX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 ; SKX-NEXT: kshiftlw $14, %k0, %k0 @@ -1317,12 +1317,12 @@ define <4 x i32> @load_one_mask_bit_set1(<4 x i32>* %addr, <4 x i32> %val) { define <4 x float> @load_one_mask_bit_set2(<4 x float>* %addr, <4 x float> %val) { ; AVX-LABEL: load_one_mask_bit_set2: ; AVX: ## BB#0: -; AVX-NEXT: vinsertps $32, 8(%rdi), %xmm0, %xmm0 ## xmm0 = xmm0[0,1],mem[0],xmm0[3] +; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] ; AVX-NEXT: retq ; ; AVX512-LABEL: load_one_mask_bit_set2: ; AVX512: ## BB#0: -; AVX512-NEXT: vinsertps $32, 8(%rdi), %xmm0, %xmm0 ## xmm0 = xmm0[0,1],mem[0],xmm0[3] +; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] ; AVX512-NEXT: retq %res = call <4 x float> @llvm.masked.load.v4f32(<4 x float>* %addr, i32 4, <4 x i1>, <4 x float> %val) ret <4 x float> %res @@ -1368,21 +1368,21 @@ define <4 x double> @load_one_mask_bit_set4(<4 x double>* %addr, <4 x double> %v ; AVX-LABEL: load_one_mask_bit_set4: ; AVX: ## BB#0: ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-NEXT: vmovhpd 24(%rdi), %xmm1, %xmm1 ## xmm1 = xmm1[0],mem[0] +; AVX-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX-NEXT: retq ; ; AVX512F-LABEL: load_one_mask_bit_set4: ; AVX512F: ## BB#0: ; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512F-NEXT: vmovhpd 24(%rdi), %xmm1, %xmm1 ## xmm1 = xmm1[0],mem[0] +; AVX512F-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] ; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX512F-NEXT: retq ; ; SKX-LABEL: load_one_mask_bit_set4: ; SKX: ## BB#0: ; SKX-NEXT: vextractf128 $1, %ymm0, %xmm1 -; SKX-NEXT: vmovhpd 24(%rdi), %xmm1, %xmm1 ## xmm1 = xmm1[0],mem[0] +; SKX-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] ; SKX-NEXT: vinsertf32x4 $1, %xmm1, %ymm0, %ymm0 ; SKX-NEXT: retq %res = call <4 x double> @llvm.masked.load.v4f64(<4 x double>* %addr, i32 4, <4 x i1>, <4 x double> %val) diff --git a/test/CodeGen/X86/vector-shuffle-128-v2.ll b/test/CodeGen/X86/vector-shuffle-128-v2.ll index d1d4f72da1e..be76233a088 100644 --- a/test/CodeGen/X86/vector-shuffle-128-v2.ll +++ b/test/CodeGen/X86/vector-shuffle-128-v2.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by update_llc_test_checks.py ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3 @@ -762,7 +763,7 @@ define <2 x i64> @shuffle_v2i64_z1(<2 x i64> %a) { ; ; AVX512VL-LABEL: shuffle_v2i64_z1: ; AVX512VL: # BB#0: -; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512VL-NEXT: vpxord %xmm1, %xmm1, %xmm1 ; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; AVX512VL-NEXT: retq %shuffle = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32> @@ -804,7 +805,7 @@ define <2 x double> @shuffle_v2f64_1z(<2 x double> %a) { ; ; AVX512VL-LABEL: shuffle_v2f64_1z: ; AVX512VL: # BB#0: -; AVX512VL-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX512VL-NEXT: vpxord %xmm1, %xmm1, %xmm1 ; AVX512VL-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; AVX512VL-NEXT: retq %shuffle = shufflevector <2 x double> %a, <2 x double> zeroinitializer, <2 x i32> @@ -833,7 +834,7 @@ define <2 x double> @shuffle_v2f64_z0(<2 x double> %a) { ; ; AVX512VL-LABEL: shuffle_v2f64_z0: ; AVX512VL: # BB#0: -; AVX512VL-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX512VL-NEXT: vpxord %xmm1, %xmm1, %xmm1 ; AVX512VL-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX512VL-NEXT: retq %shuffle = shufflevector <2 x double> %a, <2 x double> zeroinitializer, <2 x i32> @@ -865,11 +866,23 @@ define <2 x double> @shuffle_v2f64_z1(<2 x double> %a) { ; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE41-NEXT: retq ; -; AVX-LABEL: shuffle_v2f64_z1: -; AVX: # BB#0: -; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] -; AVX-NEXT: retq +; AVX1-LABEL: shuffle_v2f64_z1: +; AVX1: # BB#0: +; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; AVX1-NEXT: retq +; +; AVX2-LABEL: shuffle_v2f64_z1: +; AVX2: # BB#0: +; AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; AVX2-NEXT: retq +; +; AVX512VL-LABEL: shuffle_v2f64_z1: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpxord %xmm1, %xmm1, %xmm1 +; AVX512VL-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; AVX512VL-NEXT: retq %shuffle = shufflevector <2 x double> %a, <2 x double> zeroinitializer, <2 x i32> ret <2 x double> %shuffle } @@ -895,7 +908,7 @@ define <2 x double> @shuffle_v2f64_bitcast_1z(<2 x double> %a) { ; ; AVX512VL-LABEL: shuffle_v2f64_bitcast_1z: ; AVX512VL: # BB#0: -; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512VL-NEXT: vpxord %xmm1, %xmm1, %xmm1 ; AVX512VL-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] ; AVX512VL-NEXT: retq %shuffle64 = shufflevector <2 x double> %a, <2 x double> zeroinitializer, <2 x i32> @@ -947,7 +960,7 @@ define <2 x i64> @shuffle_v2i64_bitcast_z123(<2 x i64> %x) { ; ; AVX512VL-LABEL: shuffle_v2i64_bitcast_z123: ; AVX512VL: # BB#0: -; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512VL-NEXT: vpxord %xmm1, %xmm1, %xmm1 ; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; AVX512VL-NEXT: retq %bitcast32 = bitcast <2 x i64> %x to <4 x float> diff --git a/test/CodeGen/X86/vector-tzcnt-128.ll b/test/CodeGen/X86/vector-tzcnt-128.ll index ab75d00ea30..54b7d7bd54d 100644 --- a/test/CodeGen/X86/vector-tzcnt-128.ll +++ b/test/CodeGen/X86/vector-tzcnt-128.ll @@ -192,7 +192,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind { ; ; AVX512CDVL-LABEL: testv2i64u: ; AVX512CDVL: # BB#0: -; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512CDVL-NEXT: vpxord %xmm1, %xmm1, %xmm1 ; AVX512CDVL-NEXT: vpsubq %xmm0, %xmm1, %xmm1 ; AVX512CDVL-NEXT: vpandq %xmm1, %xmm0, %xmm0 ; AVX512CDVL-NEXT: vplzcntq %xmm0, %xmm0 @@ -388,7 +388,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind { ; ; AVX512CDVL-LABEL: testv4i32: ; AVX512CDVL: # BB#0: -; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512CDVL-NEXT: vpxord %xmm1, %xmm1, %xmm1 ; AVX512CDVL-NEXT: vpsubd %xmm0, %xmm1, %xmm2 ; AVX512CDVL-NEXT: vpandd %xmm2, %xmm0, %xmm0 ; AVX512CDVL-NEXT: vpsubd {{.*}}(%rip){1to4}, %xmm0, %xmm0 @@ -611,7 +611,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind { ; ; AVX512CDVL-LABEL: testv4i32u: ; AVX512CDVL: # BB#0: -; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512CDVL-NEXT: vpxord %xmm1, %xmm1, %xmm1 ; AVX512CDVL-NEXT: vpsubd %xmm0, %xmm1, %xmm1 ; AVX512CDVL-NEXT: vpandd %xmm1, %xmm0, %xmm0 ; AVX512CDVL-NEXT: vplzcntd %xmm0, %xmm0 @@ -794,7 +794,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind { ; ; AVX512CDVL-LABEL: testv8i16: ; AVX512CDVL: # BB#0: -; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512CDVL-NEXT: vpxord %xmm1, %xmm1, %xmm1 ; AVX512CDVL-NEXT: vpsubw %xmm0, %xmm1, %xmm1 ; AVX512CDVL-NEXT: vpandq %xmm1, %xmm0, %xmm0 ; AVX512CDVL-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0 @@ -992,7 +992,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind { ; ; AVX512CDVL-LABEL: testv8i16u: ; AVX512CDVL: # BB#0: -; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512CDVL-NEXT: vpxord %xmm1, %xmm1, %xmm1 ; AVX512CDVL-NEXT: vpsubw %xmm0, %xmm1, %xmm1 ; AVX512CDVL-NEXT: vpandq %xmm1, %xmm0, %xmm0 ; AVX512CDVL-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0 @@ -1168,7 +1168,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind { ; ; AVX512CDVL-LABEL: testv16i8: ; AVX512CDVL: # BB#0: -; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512CDVL-NEXT: vpxord %xmm1, %xmm1, %xmm1 ; AVX512CDVL-NEXT: vpsubb %xmm0, %xmm1, %xmm1 ; AVX512CDVL-NEXT: vpandq %xmm1, %xmm0, %xmm0 ; AVX512CDVL-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0 @@ -1334,7 +1334,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind { ; ; AVX512CDVL-LABEL: testv16i8u: ; AVX512CDVL: # BB#0: -; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512CDVL-NEXT: vpxord %xmm1, %xmm1, %xmm1 ; AVX512CDVL-NEXT: vpsubb %xmm0, %xmm1, %xmm1 ; AVX512CDVL-NEXT: vpandq %xmm1, %xmm0, %xmm0 ; AVX512CDVL-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0 diff --git a/test/CodeGen/X86/vector-tzcnt-256.ll b/test/CodeGen/X86/vector-tzcnt-256.ll index b7d964406b0..286bc50ec72 100644 --- a/test/CodeGen/X86/vector-tzcnt-256.ll +++ b/test/CodeGen/X86/vector-tzcnt-256.ll @@ -56,7 +56,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind { ; ; AVX512CDVL-LABEL: testv4i64: ; AVX512CDVL: # BB#0: -; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; AVX512CDVL-NEXT: vpxord %ymm1, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpsubq %ymm0, %ymm1, %ymm2 ; AVX512CDVL-NEXT: vpandq %ymm2, %ymm0, %ymm0 ; AVX512CDVL-NEXT: vpsubq {{.*}}(%rip){1to4}, %ymm0, %ymm0 @@ -144,7 +144,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind { ; ; AVX512CDVL-LABEL: testv4i64u: ; AVX512CDVL: # BB#0: -; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; AVX512CDVL-NEXT: vpxord %ymm1, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpsubq %ymm0, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpandq %ymm1, %ymm0, %ymm0 ; AVX512CDVL-NEXT: vplzcntq %ymm0, %ymm0 @@ -229,7 +229,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind { ; ; AVX512CDVL-LABEL: testv8i32: ; AVX512CDVL: # BB#0: -; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; AVX512CDVL-NEXT: vpxord %ymm1, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpsubd %ymm0, %ymm1, %ymm2 ; AVX512CDVL-NEXT: vpandd %ymm2, %ymm0, %ymm0 ; AVX512CDVL-NEXT: vpsubd {{.*}}(%rip){1to8}, %ymm0, %ymm0 @@ -337,7 +337,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind { ; ; AVX512CDVL-LABEL: testv8i32u: ; AVX512CDVL: # BB#0: -; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; AVX512CDVL-NEXT: vpxord %ymm1, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpsubd %ymm0, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpandd %ymm1, %ymm0, %ymm0 ; AVX512CDVL-NEXT: vplzcntd %ymm0, %ymm0 @@ -415,7 +415,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind { ; ; AVX512CDVL-LABEL: testv16i16: ; AVX512CDVL: # BB#0: -; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; AVX512CDVL-NEXT: vpxord %ymm1, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpsubw %ymm0, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpandq %ymm1, %ymm0, %ymm0 ; AVX512CDVL-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0 @@ -511,7 +511,7 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind { ; ; AVX512CDVL-LABEL: testv16i16u: ; AVX512CDVL: # BB#0: -; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; AVX512CDVL-NEXT: vpxord %ymm1, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpsubw %ymm0, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpandq %ymm1, %ymm0, %ymm0 ; AVX512CDVL-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0 @@ -598,7 +598,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind { ; ; AVX512CDVL-LABEL: testv32i8: ; AVX512CDVL: # BB#0: -; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; AVX512CDVL-NEXT: vpxord %ymm1, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpsubb %ymm0, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpandq %ymm1, %ymm0, %ymm0 ; AVX512CDVL-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0 @@ -679,7 +679,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind { ; ; AVX512CDVL-LABEL: testv32i8u: ; AVX512CDVL: # BB#0: -; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; AVX512CDVL-NEXT: vpxord %ymm1, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpsubb %ymm0, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpandq %ymm1, %ymm0, %ymm0 ; AVX512CDVL-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0 -- 2.11.0