From bd33f31445e7b89db98c1edc062d7589a0a9a4c1 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Wed, 14 Nov 2018 21:31:50 +0000 Subject: [PATCH] [X86][SSE] Add SSE2/SSE42 masked load/store tests Now that the load/store tests are split the impact of running the tests on multiple (illegal) targets is a lot less impactful git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@346896 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/masked_load.ll | 1644 ++++++++++++++++++++++++++++++++------ test/CodeGen/X86/masked_store.ll | 682 +++++++++++++--- 2 files changed, 1977 insertions(+), 349 deletions(-) diff --git a/test/CodeGen/X86/masked_load.ll b/test/CodeGen/X86/masked_load.ll index 334adca8079..ef67b94f70a 100644 --- a/test/CodeGen/X86/masked_load.ll +++ b/test/CodeGen/X86/masked_load.ll @@ -1,14 +1,21 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 -; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx2 < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 -; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx512f < %s | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F -; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx512f,avx512bw,avx512vl < %s | FileCheck %s --check-prefix=AVX512 --check-prefix=SKX - -; To test for the case where masked load is not legal, we should add a run with a target -; that does not have AVX, but that case should probably be a separate test file using less tests -; because it takes over 1.2 seconds to codegen these tests on Haswell 4GHz if there's no maskmov. +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=sse2 | FileCheck %s --check-prefixes=SSE,SSE2 +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=sse4.2 | FileCheck %s --check-prefixes=SSE,SSE42 +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1 +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2 +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512f,avx512bw,avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512VLBW define <1 x double> @load_v1f64_v1i64(<1 x i64> %trigger, <1 x double>* %addr, <1 x double> %dst) { +; SSE-LABEL: load_v1f64_v1i64: +; SSE: ## %bb.0: +; SSE-NEXT: testq %rdi, %rdi +; SSE-NEXT: jne LBB0_2 +; SSE-NEXT: ## %bb.1: ## %cond.load +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: LBB0_2: ## %else +; SSE-NEXT: retq +; ; AVX-LABEL: load_v1f64_v1i64: ; AVX: ## %bb.0: ; AVX-NEXT: testq %rdi, %rdi @@ -17,29 +24,59 @@ define <1 x double> @load_v1f64_v1i64(<1 x i64> %trigger, <1 x double>* %addr, < ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: LBB0_2: ## %else ; AVX-NEXT: retq -; -; AVX512-LABEL: load_v1f64_v1i64: -; AVX512: ## %bb.0: -; AVX512-NEXT: testq %rdi, %rdi -; AVX512-NEXT: jne LBB0_2 -; AVX512-NEXT: ## %bb.1: ## %cond.load -; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; AVX512-NEXT: LBB0_2: ## %else -; AVX512-NEXT: retq %mask = icmp eq <1 x i64> %trigger, zeroinitializer %res = call <1 x double> @llvm.masked.load.v1f64.p0v1f64(<1 x double>* %addr, i32 4, <1 x i1>%mask, <1 x double>%dst) ret <1 x double> %res } -declare <1 x double> @llvm.masked.load.v1f64.p0v1f64(<1 x double>*, i32, <1 x i1>, <1 x double>) define <2 x double> @load_v2f64_v2i64(<2 x i64> %trigger, <2 x double>* %addr, <2 x double> %dst) { -; AVX-LABEL: load_v2f64_v2i64: -; AVX: ## %bb.0: -; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 -; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm1, %xmm0 -; AVX-NEXT: retq +; SSE2-LABEL: load_v2f64_v2i64: +; SSE2: ## %bb.0: +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,0,3,2] +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB1_2 +; SSE2-NEXT: ## %bb.1: ## %cond.load +; SSE2-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] +; SSE2-NEXT: LBB1_2: ## %else +; SSE2-NEXT: pextrw $4, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB1_4 +; SSE2-NEXT: ## %bb.3: ## %cond.load1 +; SSE2-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; SSE2-NEXT: LBB1_4: ## %else2 +; SSE2-NEXT: movapd %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE42-LABEL: load_v2f64_v2i64: +; SSE42: ## %bb.0: +; SSE42-NEXT: pxor %xmm2, %xmm2 +; SSE42-NEXT: pcmpeqq %xmm0, %xmm2 +; SSE42-NEXT: pextrb $0, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB1_2 +; SSE42-NEXT: ## %bb.1: ## %cond.load +; SSE42-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] +; SSE42-NEXT: LBB1_2: ## %else +; SSE42-NEXT: pextrb $8, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB1_4 +; SSE42-NEXT: ## %bb.3: ## %cond.load1 +; SSE42-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; SSE42-NEXT: LBB1_4: ## %else2 +; SSE42-NEXT: movapd %xmm1, %xmm0 +; SSE42-NEXT: retq +; +; AVX1OR2-LABEL: load_v2f64_v2i64: +; AVX1OR2: ## %bb.0: +; AVX1OR2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1OR2-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0 +; AVX1OR2-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 +; AVX1OR2-NEXT: vblendvpd %xmm0, %xmm2, %xmm1, %xmm0 +; AVX1OR2-NEXT: retq ; ; AVX512F-LABEL: load_v2f64_v2i64: ; AVX512F: ## %bb.0: @@ -53,24 +90,99 @@ define <2 x double> @load_v2f64_v2i64(<2 x i64> %trigger, <2 x double>* %addr, < ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; -; SKX-LABEL: load_v2f64_v2i64: -; SKX: ## %bb.0: -; SKX-NEXT: vptestnmq %xmm0, %xmm0, %k1 -; SKX-NEXT: vblendmpd (%rdi), %xmm1, %xmm0 {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: load_v2f64_v2i64: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vptestnmq %xmm0, %xmm0, %k1 +; AVX512VLBW-NEXT: vblendmpd (%rdi), %xmm1, %xmm0 {%k1} +; AVX512VLBW-NEXT: retq %mask = icmp eq <2 x i64> %trigger, zeroinitializer %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %addr, i32 4, <2 x i1>%mask, <2 x double>%dst) ret <2 x double> %res } define <4 x float> @load_v4f32_v4i32(<4 x i32> %trigger, <4 x float>* %addr, <4 x float> %dst) { -; AVX-LABEL: load_v4f32_v4i32: -; AVX: ## %bb.0: -; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 -; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0 -; AVX-NEXT: retq +; SSE2-LABEL: load_v4f32_v4i32: +; SSE2: ## %bb.0: +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm2 +; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB2_2 +; SSE2-NEXT: ## %bb.1: ## %cond.load +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm3[0],xmm1[1,2,3] +; SSE2-NEXT: LBB2_2: ## %else +; SSE2-NEXT: pextrw $2, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB2_4 +; SSE2-NEXT: ## %bb.3: ## %cond.load1 +; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[0,0] +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[2,3] +; SSE2-NEXT: movaps %xmm2, %xmm1 +; SSE2-NEXT: LBB2_4: ## %else2 +; SSE2-NEXT: xorps %xmm2, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm2, %xmm0 +; SSE2-NEXT: pextrw $4, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB2_6 +; SSE2-NEXT: ## %bb.5: ## %cond.load4 +; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,2] +; SSE2-NEXT: LBB2_6: ## %else5 +; SSE2-NEXT: pextrw $6, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB2_8 +; SSE2-NEXT: ## %bb.7: ## %cond.load7 +; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[2,0] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0] +; SSE2-NEXT: LBB2_8: ## %else8 +; SSE2-NEXT: movaps %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE42-LABEL: load_v4f32_v4i32: +; SSE42: ## %bb.0: +; SSE42-NEXT: pxor %xmm2, %xmm2 +; SSE42-NEXT: pcmpeqd %xmm0, %xmm2 +; SSE42-NEXT: pextrb $0, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB2_2 +; SSE42-NEXT: ## %bb.1: ## %cond.load +; SSE42-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE42-NEXT: blendps {{.*#+}} xmm1 = xmm3[0],xmm1[1,2,3] +; SSE42-NEXT: LBB2_2: ## %else +; SSE42-NEXT: pextrb $4, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB2_4 +; SSE42-NEXT: ## %bb.3: ## %cond.load1 +; SSE42-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[2,3] +; SSE42-NEXT: LBB2_4: ## %else2 +; SSE42-NEXT: pxor %xmm2, %xmm2 +; SSE42-NEXT: pcmpeqd %xmm2, %xmm0 +; SSE42-NEXT: pextrb $8, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB2_6 +; SSE42-NEXT: ## %bb.5: ## %cond.load4 +; SSE42-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3] +; SSE42-NEXT: LBB2_6: ## %else5 +; SSE42-NEXT: pextrb $12, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB2_8 +; SSE42-NEXT: ## %bb.7: ## %cond.load7 +; SSE42-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],mem[0] +; SSE42-NEXT: LBB2_8: ## %else8 +; SSE42-NEXT: movaps %xmm1, %xmm0 +; SSE42-NEXT: retq +; +; AVX1OR2-LABEL: load_v4f32_v4i32: +; AVX1OR2: ## %bb.0: +; AVX1OR2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1OR2-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0 +; AVX1OR2-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 +; AVX1OR2-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0 +; AVX1OR2-NEXT: retq ; ; AVX512F-LABEL: load_v4f32_v4i32: ; AVX512F: ## %bb.0: @@ -84,17 +196,91 @@ define <4 x float> @load_v4f32_v4i32(<4 x i32> %trigger, <4 x float>* %addr, <4 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; -; SKX-LABEL: load_v4f32_v4i32: -; SKX: ## %bb.0: -; SKX-NEXT: vptestnmd %xmm0, %xmm0, %k1 -; SKX-NEXT: vblendmps (%rdi), %xmm1, %xmm0 {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: load_v4f32_v4i32: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vptestnmd %xmm0, %xmm0, %k1 +; AVX512VLBW-NEXT: vblendmps (%rdi), %xmm1, %xmm0 {%k1} +; AVX512VLBW-NEXT: retq %mask = icmp eq <4 x i32> %trigger, zeroinitializer %res = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %addr, i32 4, <4 x i1>%mask, <4 x float>%dst) ret <4 x float> %res } define <4 x i32> @load_v4i32_v4i32(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %dst) { +; SSE2-LABEL: load_v4i32_v4i32: +; SSE2: ## %bb.0: +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm2 +; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB3_2 +; SSE2-NEXT: ## %bb.1: ## %cond.load +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm3[0],xmm1[1,2,3] +; SSE2-NEXT: LBB3_2: ## %else +; SSE2-NEXT: pextrw $2, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB3_4 +; SSE2-NEXT: ## %bb.3: ## %cond.load1 +; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[0,0] +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[2,3] +; SSE2-NEXT: movaps %xmm2, %xmm1 +; SSE2-NEXT: LBB3_4: ## %else2 +; SSE2-NEXT: xorps %xmm2, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm2, %xmm0 +; SSE2-NEXT: pextrw $4, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB3_6 +; SSE2-NEXT: ## %bb.5: ## %cond.load4 +; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,2] +; SSE2-NEXT: LBB3_6: ## %else5 +; SSE2-NEXT: pextrw $6, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB3_8 +; SSE2-NEXT: ## %bb.7: ## %cond.load7 +; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[2,0] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0] +; SSE2-NEXT: LBB3_8: ## %else8 +; SSE2-NEXT: movaps %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE42-LABEL: load_v4i32_v4i32: +; SSE42: ## %bb.0: +; SSE42-NEXT: pxor %xmm2, %xmm2 +; SSE42-NEXT: pcmpeqd %xmm0, %xmm2 +; SSE42-NEXT: pextrb $0, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB3_2 +; SSE42-NEXT: ## %bb.1: ## %cond.load +; SSE42-NEXT: pinsrd $0, (%rdi), %xmm1 +; SSE42-NEXT: LBB3_2: ## %else +; SSE42-NEXT: pextrb $4, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB3_4 +; SSE42-NEXT: ## %bb.3: ## %cond.load1 +; SSE42-NEXT: pinsrd $1, 4(%rdi), %xmm1 +; SSE42-NEXT: LBB3_4: ## %else2 +; SSE42-NEXT: pxor %xmm2, %xmm2 +; SSE42-NEXT: pcmpeqd %xmm2, %xmm0 +; SSE42-NEXT: pextrb $8, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB3_6 +; SSE42-NEXT: ## %bb.5: ## %cond.load4 +; SSE42-NEXT: pinsrd $2, 8(%rdi), %xmm1 +; SSE42-NEXT: LBB3_6: ## %else5 +; SSE42-NEXT: pextrb $12, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB3_8 +; SSE42-NEXT: ## %bb.7: ## %cond.load7 +; SSE42-NEXT: pinsrd $3, 12(%rdi), %xmm1 +; SSE42-NEXT: LBB3_8: ## %else8 +; SSE42-NEXT: movdqa %xmm1, %xmm0 +; SSE42-NEXT: retq +; ; AVX1-LABEL: load_v4i32_v4i32: ; AVX1: ## %bb.0: ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 @@ -123,17 +309,85 @@ define <4 x i32> @load_v4i32_v4i32(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i3 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; -; SKX-LABEL: load_v4i32_v4i32: -; SKX: ## %bb.0: -; SKX-NEXT: vptestnmd %xmm0, %xmm0, %k1 -; SKX-NEXT: vpblendmd (%rdi), %xmm1, %xmm0 {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: load_v4i32_v4i32: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vptestnmd %xmm0, %xmm0, %k1 +; AVX512VLBW-NEXT: vpblendmd (%rdi), %xmm1, %xmm0 {%k1} +; AVX512VLBW-NEXT: retq %mask = icmp eq <4 x i32> %trigger, zeroinitializer %res = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr, i32 4, <4 x i1>%mask, <4 x i32>%dst) ret <4 x i32> %res } define <4 x double> @load_v4f64_v4i32(<4 x i32> %trigger, <4 x double>* %addr, <4 x double> %dst) { +; SSE2-LABEL: load_v4f64_v4i32: +; SSE2: ## %bb.0: +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm3 +; SSE2-NEXT: movd %xmm3, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB4_2 +; SSE2-NEXT: ## %bb.1: ## %cond.load +; SSE2-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] +; SSE2-NEXT: LBB4_2: ## %else +; SSE2-NEXT: pextrw $2, %xmm3, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB4_4 +; SSE2-NEXT: ## %bb.3: ## %cond.load1 +; SSE2-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; SSE2-NEXT: LBB4_4: ## %else2 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm0 +; SSE2-NEXT: pextrw $4, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB4_6 +; SSE2-NEXT: ## %bb.5: ## %cond.load4 +; SSE2-NEXT: movlpd {{.*#+}} xmm2 = mem[0],xmm2[1] +; SSE2-NEXT: LBB4_6: ## %else5 +; SSE2-NEXT: pextrw $6, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB4_8 +; SSE2-NEXT: ## %bb.7: ## %cond.load7 +; SSE2-NEXT: movhpd {{.*#+}} xmm2 = xmm2[0],mem[0] +; SSE2-NEXT: LBB4_8: ## %else8 +; SSE2-NEXT: movapd %xmm1, %xmm0 +; SSE2-NEXT: movapd %xmm2, %xmm1 +; SSE2-NEXT: retq +; +; SSE42-LABEL: load_v4f64_v4i32: +; SSE42: ## %bb.0: +; SSE42-NEXT: pxor %xmm3, %xmm3 +; SSE42-NEXT: pcmpeqd %xmm0, %xmm3 +; SSE42-NEXT: pextrb $0, %xmm3, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB4_2 +; SSE42-NEXT: ## %bb.1: ## %cond.load +; SSE42-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] +; SSE42-NEXT: LBB4_2: ## %else +; SSE42-NEXT: pextrb $4, %xmm3, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB4_4 +; SSE42-NEXT: ## %bb.3: ## %cond.load1 +; SSE42-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; SSE42-NEXT: LBB4_4: ## %else2 +; SSE42-NEXT: pxor %xmm3, %xmm3 +; SSE42-NEXT: pcmpeqd %xmm3, %xmm0 +; SSE42-NEXT: pextrb $8, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB4_6 +; SSE42-NEXT: ## %bb.5: ## %cond.load4 +; SSE42-NEXT: movlpd {{.*#+}} xmm2 = mem[0],xmm2[1] +; SSE42-NEXT: LBB4_6: ## %else5 +; SSE42-NEXT: pextrb $12, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB4_8 +; SSE42-NEXT: ## %bb.7: ## %cond.load7 +; SSE42-NEXT: movhpd {{.*#+}} xmm2 = xmm2[0],mem[0] +; SSE42-NEXT: LBB4_8: ## %else8 +; SSE42-NEXT: movapd %xmm1, %xmm0 +; SSE42-NEXT: movapd %xmm2, %xmm1 +; SSE42-NEXT: retq +; ; AVX1-LABEL: load_v4f64_v4i32: ; AVX1: ## %bb.0: ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 @@ -166,17 +420,89 @@ define <4 x double> @load_v4f64_v4i32(<4 x i32> %trigger, <4 x double>* %addr, < ; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; -; SKX-LABEL: load_v4f64_v4i32: -; SKX: ## %bb.0: -; SKX-NEXT: vptestnmd %xmm0, %xmm0, %k1 -; SKX-NEXT: vblendmpd (%rdi), %ymm1, %ymm0 {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: load_v4f64_v4i32: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vptestnmd %xmm0, %xmm0, %k1 +; AVX512VLBW-NEXT: vblendmpd (%rdi), %ymm1, %ymm0 {%k1} +; AVX512VLBW-NEXT: retq %mask = icmp eq <4 x i32> %trigger, zeroinitializer %res = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %addr, i32 32, <4 x i1>%mask, <4 x double>%dst) ret <4 x double> %res } define <4 x double> @load_zero_v4f64_v4i32(<4 x i32> %trigger, <4 x double>* %addr, <4 x double> %dst) { +; SSE2-LABEL: load_zero_v4f64_v4i32: +; SSE2: ## %bb.0: +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: pxor %xmm0, %xmm0 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm3 +; SSE2-NEXT: movd %xmm3, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: xorpd %xmm1, %xmm1 +; SSE2-NEXT: je LBB5_2 +; SSE2-NEXT: ## %bb.1: ## %cond.load +; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE2-NEXT: xorpd %xmm1, %xmm1 +; SSE2-NEXT: LBB5_2: ## %else +; SSE2-NEXT: pextrw $2, %xmm3, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB5_4 +; SSE2-NEXT: ## %bb.3: ## %cond.load1 +; SSE2-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0] +; SSE2-NEXT: LBB5_4: ## %else2 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm2 +; SSE2-NEXT: pextrw $4, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB5_6 +; SSE2-NEXT: ## %bb.5: ## %cond.load4 +; SSE2-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] +; SSE2-NEXT: LBB5_6: ## %else5 +; SSE2-NEXT: pextrw $6, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB5_8 +; SSE2-NEXT: ## %bb.7: ## %cond.load7 +; SSE2-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; SSE2-NEXT: LBB5_8: ## %else8 +; SSE2-NEXT: retq +; +; SSE42-LABEL: load_zero_v4f64_v4i32: +; SSE42: ## %bb.0: +; SSE42-NEXT: movdqa %xmm0, %xmm2 +; SSE42-NEXT: pxor %xmm0, %xmm0 +; SSE42-NEXT: movdqa %xmm2, %xmm3 +; SSE42-NEXT: pcmpeqd %xmm0, %xmm3 +; SSE42-NEXT: pextrb $0, %xmm3, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: xorpd %xmm1, %xmm1 +; SSE42-NEXT: je LBB5_2 +; SSE42-NEXT: ## %bb.1: ## %cond.load +; SSE42-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE42-NEXT: xorpd %xmm1, %xmm1 +; SSE42-NEXT: LBB5_2: ## %else +; SSE42-NEXT: pextrb $4, %xmm3, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB5_4 +; SSE42-NEXT: ## %bb.3: ## %cond.load1 +; SSE42-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0] +; SSE42-NEXT: LBB5_4: ## %else2 +; SSE42-NEXT: pxor %xmm3, %xmm3 +; SSE42-NEXT: pcmpeqd %xmm3, %xmm2 +; SSE42-NEXT: pextrb $8, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB5_6 +; SSE42-NEXT: ## %bb.5: ## %cond.load4 +; SSE42-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] +; SSE42-NEXT: LBB5_6: ## %else5 +; SSE42-NEXT: pextrb $12, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB5_8 +; SSE42-NEXT: ## %bb.7: ## %cond.load7 +; SSE42-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; SSE42-NEXT: LBB5_8: ## %else8 +; SSE42-NEXT: retq +; ; AVX1-LABEL: load_zero_v4f64_v4i32: ; AVX1: ## %bb.0: ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 @@ -206,17 +532,163 @@ define <4 x double> @load_zero_v4f64_v4i32(<4 x i32> %trigger, <4 x double>* %ad ; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; -; SKX-LABEL: load_zero_v4f64_v4i32: -; SKX: ## %bb.0: -; SKX-NEXT: vptestnmd %xmm0, %xmm0, %k1 -; SKX-NEXT: vmovapd (%rdi), %ymm0 {%k1} {z} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: load_zero_v4f64_v4i32: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vptestnmd %xmm0, %xmm0, %k1 +; AVX512VLBW-NEXT: vmovapd (%rdi), %ymm0 {%k1} {z} +; AVX512VLBW-NEXT: retq %mask = icmp eq <4 x i32> %trigger, zeroinitializer %res = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %addr, i32 32, <4 x i1>%mask, <4 x double>zeroinitializer) ret <4 x double> %res } define <8 x float> @load_v8f32_v8i32(<8 x i32> %trigger, <8 x float>* %addr, <8 x float> %dst) { +; SSE2-LABEL: load_v8f32_v8i32: +; SSE2: ## %bb.0: +; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm4 +; SSE2-NEXT: movdqa %xmm4, %xmm5 +; SSE2-NEXT: packssdw %xmm0, %xmm5 +; SSE2-NEXT: movd %xmm5, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB6_2 +; SSE2-NEXT: ## %bb.1: ## %cond.load +; SSE2-NEXT: movss {{.*#+}} xmm5 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm5[0],xmm2[1,2,3] +; SSE2-NEXT: LBB6_2: ## %else +; SSE2-NEXT: packssdw %xmm0, %xmm4 +; SSE2-NEXT: movd %xmm4, %eax +; SSE2-NEXT: shrl $16, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB6_4 +; SSE2-NEXT: ## %bb.3: ## %cond.load1 +; SSE2-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0],xmm2[0,0] +; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm2[2,3] +; SSE2-NEXT: movaps %xmm4, %xmm2 +; SSE2-NEXT: LBB6_4: ## %else2 +; SSE2-NEXT: xorps %xmm4, %xmm4 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm0 +; SSE2-NEXT: pextrw $4, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB6_6 +; SSE2-NEXT: ## %bb.5: ## %cond.load4 +; SSE2-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0],xmm2[3,0] +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0,2] +; SSE2-NEXT: LBB6_6: ## %else5 +; SSE2-NEXT: pextrw $6, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB6_8 +; SSE2-NEXT: ## %bb.7: ## %cond.load7 +; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm2[2,0] +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0] +; SSE2-NEXT: LBB6_8: ## %else8 +; SSE2-NEXT: xorps %xmm0, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm1, %xmm0 +; SSE2-NEXT: pextrw $0, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB6_10 +; SSE2-NEXT: ## %bb.9: ## %cond.load10 +; SSE2-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm3 = xmm4[0],xmm3[1,2,3] +; SSE2-NEXT: LBB6_10: ## %else11 +; SSE2-NEXT: pextrw $2, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB6_12 +; SSE2-NEXT: ## %bb.11: ## %cond.load13 +; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm3[0,0] +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm3[2,3] +; SSE2-NEXT: movaps %xmm0, %xmm3 +; SSE2-NEXT: LBB6_12: ## %else14 +; SSE2-NEXT: xorps %xmm0, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm1 +; SSE2-NEXT: pextrw $4, %xmm1, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB6_14 +; SSE2-NEXT: ## %bb.13: ## %cond.load16 +; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm3[3,0] +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[0,2] +; SSE2-NEXT: LBB6_14: ## %else17 +; SSE2-NEXT: pextrw $6, %xmm1, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB6_16 +; SSE2-NEXT: ## %bb.15: ## %cond.load19 +; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm3[2,0] +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,0] +; SSE2-NEXT: LBB6_16: ## %else20 +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: movaps %xmm3, %xmm1 +; SSE2-NEXT: retq +; +; SSE42-LABEL: load_v8f32_v8i32: +; SSE42: ## %bb.0: +; SSE42-NEXT: pxor %xmm4, %xmm4 +; SSE42-NEXT: pcmpeqd %xmm0, %xmm4 +; SSE42-NEXT: pextrb $0, %xmm4, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB6_2 +; SSE42-NEXT: ## %bb.1: ## %cond.load +; SSE42-NEXT: movss {{.*#+}} xmm5 = mem[0],zero,zero,zero +; SSE42-NEXT: blendps {{.*#+}} xmm2 = xmm5[0],xmm2[1,2,3] +; SSE42-NEXT: LBB6_2: ## %else +; SSE42-NEXT: pextrb $4, %xmm4, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB6_4 +; SSE42-NEXT: ## %bb.3: ## %cond.load1 +; SSE42-NEXT: insertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3] +; SSE42-NEXT: LBB6_4: ## %else2 +; SSE42-NEXT: pxor %xmm4, %xmm4 +; SSE42-NEXT: pcmpeqd %xmm4, %xmm0 +; SSE42-NEXT: pextrb $8, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB6_6 +; SSE42-NEXT: ## %bb.5: ## %cond.load4 +; SSE42-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3] +; SSE42-NEXT: LBB6_6: ## %else5 +; SSE42-NEXT: pextrb $12, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB6_8 +; SSE42-NEXT: ## %bb.7: ## %cond.load7 +; SSE42-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0] +; SSE42-NEXT: LBB6_8: ## %else8 +; SSE42-NEXT: pxor %xmm0, %xmm0 +; SSE42-NEXT: pcmpeqd %xmm1, %xmm0 +; SSE42-NEXT: pextrb $0, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB6_10 +; SSE42-NEXT: ## %bb.9: ## %cond.load10 +; SSE42-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero +; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3,4,5,6,7] +; SSE42-NEXT: LBB6_10: ## %else11 +; SSE42-NEXT: pextrb $4, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB6_12 +; SSE42-NEXT: ## %bb.11: ## %cond.load13 +; SSE42-NEXT: insertps {{.*#+}} xmm3 = xmm3[0],mem[0],xmm3[2,3] +; SSE42-NEXT: LBB6_12: ## %else14 +; SSE42-NEXT: pxor %xmm0, %xmm0 +; SSE42-NEXT: pcmpeqd %xmm0, %xmm1 +; SSE42-NEXT: pextrb $8, %xmm1, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB6_14 +; SSE42-NEXT: ## %bb.13: ## %cond.load16 +; SSE42-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1],mem[0],xmm3[3] +; SSE42-NEXT: LBB6_14: ## %else17 +; SSE42-NEXT: pextrb $12, %xmm1, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB6_16 +; SSE42-NEXT: ## %bb.15: ## %cond.load19 +; SSE42-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1,2],mem[0] +; SSE42-NEXT: LBB6_16: ## %else20 +; SSE42-NEXT: movaps %xmm2, %xmm0 +; SSE42-NEXT: movaps %xmm3, %xmm1 +; SSE42-NEXT: retq +; ; AVX1-LABEL: load_v8f32_v8i32: ; AVX1: ## %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 @@ -247,17 +719,141 @@ define <8 x float> @load_v8f32_v8i32(<8 x i32> %trigger, <8 x float>* %addr, <8 ; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; -; SKX-LABEL: load_v8f32_v8i32: -; SKX: ## %bb.0: -; SKX-NEXT: vptestnmd %ymm0, %ymm0, %k1 -; SKX-NEXT: vblendmps (%rdi), %ymm1, %ymm0 {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: load_v8f32_v8i32: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vptestnmd %ymm0, %ymm0, %k1 +; AVX512VLBW-NEXT: vblendmps (%rdi), %ymm1, %ymm0 {%k1} +; AVX512VLBW-NEXT: retq %mask = icmp eq <8 x i32> %trigger, zeroinitializer %res = call <8 x float> @llvm.masked.load.v8f32.p0v8f32(<8 x float>* %addr, i32 32, <8 x i1>%mask, <8 x float>%dst) ret <8 x float> %res } define <8 x i32> @load_v8i32_v8i1(<8 x i1> %mask, <8 x i32>* %addr, <8 x i32> %dst) { +; SSE2-LABEL: load_v8i32_v8i1: +; SSE2: ## %bb.0: +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB7_2 +; SSE2-NEXT: ## %bb.1: ## %cond.load +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm3[0],xmm1[1,2,3] +; SSE2-NEXT: LBB7_2: ## %else +; SSE2-NEXT: shrl $16, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB7_4 +; SSE2-NEXT: ## %bb.3: ## %cond.load1 +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm1[0,0] +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[2,3] +; SSE2-NEXT: movaps %xmm3, %xmm1 +; SSE2-NEXT: LBB7_4: ## %else2 +; SSE2-NEXT: pextrw $2, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB7_6 +; SSE2-NEXT: ## %bb.5: ## %cond.load4 +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm1[3,0] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0,2] +; SSE2-NEXT: LBB7_6: ## %else5 +; SSE2-NEXT: pextrw $3, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB7_8 +; SSE2-NEXT: ## %bb.7: ## %cond.load7 +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm1[2,0] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,0] +; SSE2-NEXT: LBB7_8: ## %else8 +; SSE2-NEXT: pextrw $4, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB7_10 +; SSE2-NEXT: ## %bb.9: ## %cond.load10 +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3] +; SSE2-NEXT: LBB7_10: ## %else11 +; SSE2-NEXT: pextrw $5, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB7_12 +; SSE2-NEXT: ## %bb.11: ## %cond.load13 +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm2[0,0] +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm2[2,3] +; SSE2-NEXT: movaps %xmm3, %xmm2 +; SSE2-NEXT: LBB7_12: ## %else14 +; SSE2-NEXT: pextrw $6, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB7_14 +; SSE2-NEXT: ## %bb.13: ## %cond.load16 +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm2[3,0] +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0,2] +; SSE2-NEXT: LBB7_14: ## %else17 +; SSE2-NEXT: pextrw $7, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB7_16 +; SSE2-NEXT: ## %bb.15: ## %cond.load19 +; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm2[2,0] +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0] +; SSE2-NEXT: LBB7_16: ## %else20 +; SSE2-NEXT: movaps %xmm1, %xmm0 +; SSE2-NEXT: movaps %xmm2, %xmm1 +; SSE2-NEXT: retq +; +; SSE42-LABEL: load_v8i32_v8i1: +; SSE42: ## %bb.0: +; SSE42-NEXT: pextrb $0, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB7_2 +; SSE42-NEXT: ## %bb.1: ## %cond.load +; SSE42-NEXT: pinsrd $0, (%rdi), %xmm1 +; SSE42-NEXT: LBB7_2: ## %else +; SSE42-NEXT: pextrb $2, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB7_4 +; SSE42-NEXT: ## %bb.3: ## %cond.load1 +; SSE42-NEXT: pinsrd $1, 4(%rdi), %xmm1 +; SSE42-NEXT: LBB7_4: ## %else2 +; SSE42-NEXT: pextrb $4, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB7_6 +; SSE42-NEXT: ## %bb.5: ## %cond.load4 +; SSE42-NEXT: pinsrd $2, 8(%rdi), %xmm1 +; SSE42-NEXT: LBB7_6: ## %else5 +; SSE42-NEXT: pextrb $6, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB7_8 +; SSE42-NEXT: ## %bb.7: ## %cond.load7 +; SSE42-NEXT: pinsrd $3, 12(%rdi), %xmm1 +; SSE42-NEXT: LBB7_8: ## %else8 +; SSE42-NEXT: pextrb $8, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB7_10 +; SSE42-NEXT: ## %bb.9: ## %cond.load10 +; SSE42-NEXT: pinsrd $0, 16(%rdi), %xmm2 +; SSE42-NEXT: LBB7_10: ## %else11 +; SSE42-NEXT: pextrb $10, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB7_12 +; SSE42-NEXT: ## %bb.11: ## %cond.load13 +; SSE42-NEXT: pinsrd $1, 20(%rdi), %xmm2 +; SSE42-NEXT: LBB7_12: ## %else14 +; SSE42-NEXT: pextrb $12, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB7_14 +; SSE42-NEXT: ## %bb.13: ## %cond.load16 +; SSE42-NEXT: pinsrd $2, 24(%rdi), %xmm2 +; SSE42-NEXT: LBB7_14: ## %else17 +; SSE42-NEXT: pextrb $14, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB7_16 +; SSE42-NEXT: ## %bb.15: ## %cond.load19 +; SSE42-NEXT: pinsrd $3, 28(%rdi), %xmm2 +; SSE42-NEXT: LBB7_16: ## %else20 +; SSE42-NEXT: movdqa %xmm1, %xmm0 +; SSE42-NEXT: movdqa %xmm2, %xmm1 +; SSE42-NEXT: retq +; ; AVX1-LABEL: load_v8i32_v8i1: ; AVX1: ## %bb.0: ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero @@ -290,17 +886,145 @@ define <8 x i32> @load_v8i32_v8i1(<8 x i1> %mask, <8 x i32>* %addr, <8 x i32> %d ; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; -; SKX-LABEL: load_v8i32_v8i1: -; SKX: ## %bb.0: -; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 -; SKX-NEXT: vpmovw2m %xmm0, %k1 -; SKX-NEXT: vpblendmd (%rdi), %ymm1, %ymm0 {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: load_v8i32_v8i1: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vpsllw $15, %xmm0, %xmm0 +; AVX512VLBW-NEXT: vpmovw2m %xmm0, %k1 +; AVX512VLBW-NEXT: vpblendmd (%rdi), %ymm1, %ymm0 {%k1} +; AVX512VLBW-NEXT: retq %res = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* %addr, i32 4, <8 x i1>%mask, <8 x i32>%dst) ret <8 x i32> %res } define <8 x float> @load_zero_v8f32_v8i1(<8 x i1> %mask, <8 x float>* %addr) { +; SSE2-LABEL: load_zero_v8f32_v8i1: +; SSE2: ## %bb.0: +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: pxor %xmm0, %xmm0 +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: xorps %xmm1, %xmm1 +; SSE2-NEXT: je LBB8_2 +; SSE2-NEXT: ## %bb.1: ## %cond.load +; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: xorps %xmm1, %xmm1 +; SSE2-NEXT: LBB8_2: ## %else +; SSE2-NEXT: shrl $16, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB8_4 +; SSE2-NEXT: ## %bb.3: ## %cond.load1 +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm0[0,0] +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm0[2,3] +; SSE2-NEXT: movaps %xmm3, %xmm0 +; SSE2-NEXT: LBB8_4: ## %else2 +; SSE2-NEXT: pextrw $2, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB8_6 +; SSE2-NEXT: ## %bb.5: ## %cond.load4 +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm0[3,0] +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0,2] +; SSE2-NEXT: LBB8_6: ## %else5 +; SSE2-NEXT: pextrw $3, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB8_8 +; SSE2-NEXT: ## %bb.7: ## %cond.load7 +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm0[2,0] +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,0] +; SSE2-NEXT: LBB8_8: ## %else8 +; SSE2-NEXT: pextrw $4, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB8_10 +; SSE2-NEXT: ## %bb.9: ## %cond.load10 +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm3[0],xmm1[1,2,3] +; SSE2-NEXT: LBB8_10: ## %else11 +; SSE2-NEXT: pextrw $5, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB8_12 +; SSE2-NEXT: ## %bb.11: ## %cond.load13 +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm1[0,0] +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[2,3] +; SSE2-NEXT: movaps %xmm3, %xmm1 +; SSE2-NEXT: LBB8_12: ## %else14 +; SSE2-NEXT: pextrw $6, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB8_14 +; SSE2-NEXT: ## %bb.13: ## %cond.load16 +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm1[3,0] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0,2] +; SSE2-NEXT: LBB8_14: ## %else17 +; SSE2-NEXT: pextrw $7, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB8_16 +; SSE2-NEXT: ## %bb.15: ## %cond.load19 +; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[2,0] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,0] +; SSE2-NEXT: LBB8_16: ## %else20 +; SSE2-NEXT: retq +; +; SSE42-LABEL: load_zero_v8f32_v8i1: +; SSE42: ## %bb.0: +; SSE42-NEXT: movdqa %xmm0, %xmm2 +; SSE42-NEXT: pextrb $0, %xmm0, %eax +; SSE42-NEXT: pxor %xmm0, %xmm0 +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: xorps %xmm1, %xmm1 +; SSE42-NEXT: je LBB8_2 +; SSE42-NEXT: ## %bb.1: ## %cond.load +; SSE42-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE42-NEXT: xorps %xmm1, %xmm1 +; SSE42-NEXT: LBB8_2: ## %else +; SSE42-NEXT: pextrb $2, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB8_4 +; SSE42-NEXT: ## %bb.3: ## %cond.load1 +; SSE42-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3] +; SSE42-NEXT: LBB8_4: ## %else2 +; SSE42-NEXT: pextrb $4, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB8_6 +; SSE42-NEXT: ## %bb.5: ## %cond.load4 +; SSE42-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] +; SSE42-NEXT: LBB8_6: ## %else5 +; SSE42-NEXT: pextrb $6, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB8_8 +; SSE42-NEXT: ## %bb.7: ## %cond.load7 +; SSE42-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] +; SSE42-NEXT: LBB8_8: ## %else8 +; SSE42-NEXT: pextrb $8, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB8_10 +; SSE42-NEXT: ## %bb.9: ## %cond.load10 +; SSE42-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE42-NEXT: blendps {{.*#+}} xmm1 = xmm3[0],xmm1[1,2,3] +; SSE42-NEXT: LBB8_10: ## %else11 +; SSE42-NEXT: pextrb $10, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB8_12 +; SSE42-NEXT: ## %bb.11: ## %cond.load13 +; SSE42-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[2,3] +; SSE42-NEXT: LBB8_12: ## %else14 +; SSE42-NEXT: pextrb $12, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB8_14 +; SSE42-NEXT: ## %bb.13: ## %cond.load16 +; SSE42-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3] +; SSE42-NEXT: LBB8_14: ## %else17 +; SSE42-NEXT: pextrb $14, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB8_16 +; SSE42-NEXT: ## %bb.15: ## %cond.load19 +; SSE42-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],mem[0] +; SSE42-NEXT: LBB8_16: ## %else20 +; SSE42-NEXT: retq +; ; AVX1-LABEL: load_zero_v8f32_v8i1: ; AVX1: ## %bb.0: ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero @@ -330,17 +1054,144 @@ define <8 x float> @load_zero_v8f32_v8i1(<8 x i1> %mask, <8 x float>* %addr) { ; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; -; SKX-LABEL: load_zero_v8f32_v8i1: -; SKX: ## %bb.0: -; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 -; SKX-NEXT: vpmovw2m %xmm0, %k1 -; SKX-NEXT: vmovaps (%rdi), %ymm0 {%k1} {z} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: load_zero_v8f32_v8i1: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vpsllw $15, %xmm0, %xmm0 +; AVX512VLBW-NEXT: vpmovw2m %xmm0, %k1 +; AVX512VLBW-NEXT: vmovaps (%rdi), %ymm0 {%k1} {z} +; AVX512VLBW-NEXT: retq %res = call <8 x float> @llvm.masked.load.v8f32.p0v8f32(<8 x float>* %addr, i32 32, <8 x i1> %mask, <8 x float> zeroinitializer) ret <8 x float> %res } define <8 x i32> @load_zero_v8i32_v8i1(<8 x i1> %mask, <8 x i32>* %addr) { +; SSE2-LABEL: load_zero_v8i32_v8i1: +; SSE2: ## %bb.0: +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: pxor %xmm0, %xmm0 +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: xorps %xmm1, %xmm1 +; SSE2-NEXT: je LBB9_2 +; SSE2-NEXT: ## %bb.1: ## %cond.load +; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: xorps %xmm1, %xmm1 +; SSE2-NEXT: LBB9_2: ## %else +; SSE2-NEXT: shrl $16, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB9_4 +; SSE2-NEXT: ## %bb.3: ## %cond.load1 +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm0[0,0] +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm0[2,3] +; SSE2-NEXT: movaps %xmm3, %xmm0 +; SSE2-NEXT: LBB9_4: ## %else2 +; SSE2-NEXT: pextrw $2, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB9_6 +; SSE2-NEXT: ## %bb.5: ## %cond.load4 +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm0[3,0] +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0,2] +; SSE2-NEXT: LBB9_6: ## %else5 +; SSE2-NEXT: pextrw $3, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB9_8 +; SSE2-NEXT: ## %bb.7: ## %cond.load7 +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm0[2,0] +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,0] +; SSE2-NEXT: LBB9_8: ## %else8 +; SSE2-NEXT: pextrw $4, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB9_10 +; SSE2-NEXT: ## %bb.9: ## %cond.load10 +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm3[0],xmm1[1,2,3] +; SSE2-NEXT: LBB9_10: ## %else11 +; SSE2-NEXT: pextrw $5, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB9_12 +; SSE2-NEXT: ## %bb.11: ## %cond.load13 +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm1[0,0] +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[2,3] +; SSE2-NEXT: movaps %xmm3, %xmm1 +; SSE2-NEXT: LBB9_12: ## %else14 +; SSE2-NEXT: pextrw $6, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB9_14 +; SSE2-NEXT: ## %bb.13: ## %cond.load16 +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm1[3,0] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0,2] +; SSE2-NEXT: LBB9_14: ## %else17 +; SSE2-NEXT: pextrw $7, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB9_16 +; SSE2-NEXT: ## %bb.15: ## %cond.load19 +; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[2,0] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,0] +; SSE2-NEXT: LBB9_16: ## %else20 +; SSE2-NEXT: retq +; +; SSE42-LABEL: load_zero_v8i32_v8i1: +; SSE42: ## %bb.0: +; SSE42-NEXT: movdqa %xmm0, %xmm2 +; SSE42-NEXT: pextrb $0, %xmm0, %eax +; SSE42-NEXT: pxor %xmm0, %xmm0 +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: pxor %xmm1, %xmm1 +; SSE42-NEXT: je LBB9_2 +; SSE42-NEXT: ## %bb.1: ## %cond.load +; SSE42-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE42-NEXT: pxor %xmm1, %xmm1 +; SSE42-NEXT: LBB9_2: ## %else +; SSE42-NEXT: pextrb $2, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB9_4 +; SSE42-NEXT: ## %bb.3: ## %cond.load1 +; SSE42-NEXT: pinsrd $1, 4(%rdi), %xmm0 +; SSE42-NEXT: LBB9_4: ## %else2 +; SSE42-NEXT: pextrb $4, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB9_6 +; SSE42-NEXT: ## %bb.5: ## %cond.load4 +; SSE42-NEXT: pinsrd $2, 8(%rdi), %xmm0 +; SSE42-NEXT: LBB9_6: ## %else5 +; SSE42-NEXT: pextrb $6, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB9_8 +; SSE42-NEXT: ## %bb.7: ## %cond.load7 +; SSE42-NEXT: pinsrd $3, 12(%rdi), %xmm0 +; SSE42-NEXT: LBB9_8: ## %else8 +; SSE42-NEXT: pextrb $8, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB9_10 +; SSE42-NEXT: ## %bb.9: ## %cond.load10 +; SSE42-NEXT: pinsrd $0, 16(%rdi), %xmm1 +; SSE42-NEXT: LBB9_10: ## %else11 +; SSE42-NEXT: pextrb $10, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB9_12 +; SSE42-NEXT: ## %bb.11: ## %cond.load13 +; SSE42-NEXT: pinsrd $1, 20(%rdi), %xmm1 +; SSE42-NEXT: LBB9_12: ## %else14 +; SSE42-NEXT: pextrb $12, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB9_14 +; SSE42-NEXT: ## %bb.13: ## %cond.load16 +; SSE42-NEXT: pinsrd $2, 24(%rdi), %xmm1 +; SSE42-NEXT: LBB9_14: ## %else17 +; SSE42-NEXT: pextrb $14, %xmm2, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB9_16 +; SSE42-NEXT: ## %bb.15: ## %cond.load19 +; SSE42-NEXT: pinsrd $3, 28(%rdi), %xmm1 +; SSE42-NEXT: LBB9_16: ## %else20 +; SSE42-NEXT: retq +; ; AVX1-LABEL: load_zero_v8i32_v8i1: ; AVX1: ## %bb.0: ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero @@ -370,17 +1221,67 @@ define <8 x i32> @load_zero_v8i32_v8i1(<8 x i1> %mask, <8 x i32>* %addr) { ; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; -; SKX-LABEL: load_zero_v8i32_v8i1: -; SKX: ## %bb.0: -; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 -; SKX-NEXT: vpmovw2m %xmm0, %k1 -; SKX-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1} {z} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: load_zero_v8i32_v8i1: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vpsllw $15, %xmm0, %xmm0 +; AVX512VLBW-NEXT: vpmovw2m %xmm0, %k1 +; AVX512VLBW-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1} {z} +; AVX512VLBW-NEXT: retq %res = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* %addr, i32 4, <8 x i1> %mask, <8 x i32> zeroinitializer) ret <8 x i32> %res } define <2 x float> @load_v2f32_v2i32(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %dst) { +; SSE2-LABEL: load_v2f32_v2i32: +; SSE2: ## %bb.0: +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,0,3,2] +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB10_2 +; SSE2-NEXT: ## %bb.1: ## %cond.load +; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3] +; SSE2-NEXT: LBB10_2: ## %else +; SSE2-NEXT: pextrw $4, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB10_4 +; SSE2-NEXT: ## %bb.3: ## %cond.load1 +; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3] +; SSE2-NEXT: movaps %xmm0, %xmm1 +; SSE2-NEXT: LBB10_4: ## %else2 +; SSE2-NEXT: movaps %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE42-LABEL: load_v2f32_v2i32: +; SSE42: ## %bb.0: +; SSE42-NEXT: pxor %xmm2, %xmm2 +; SSE42-NEXT: movdqa %xmm0, %xmm3 +; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7] +; SSE42-NEXT: pcmpeqq %xmm2, %xmm3 +; SSE42-NEXT: pextrb $0, %xmm3, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB10_2 +; SSE42-NEXT: ## %bb.1: ## %cond.load +; SSE42-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3,4,5,6,7] +; SSE42-NEXT: LBB10_2: ## %else +; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; SSE42-NEXT: pcmpeqq %xmm2, %xmm0 +; SSE42-NEXT: pextrb $8, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB10_4 +; SSE42-NEXT: ## %bb.3: ## %cond.load1 +; SSE42-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[2,3] +; SSE42-NEXT: LBB10_4: ## %else2 +; SSE42-NEXT: movaps %xmm1, %xmm0 +; SSE42-NEXT: retq +; ; AVX1-LABEL: load_v2f32_v2i32: ; AVX1: ## %bb.0: ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 @@ -414,19 +1315,70 @@ define <2 x float> @load_v2f32_v2i32(<2 x i32> %trigger, <2 x float>* %addr, <2 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; -; SKX-LABEL: load_v2f32_v2i32: -; SKX: ## %bb.0: -; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] -; SKX-NEXT: vptestnmq %xmm0, %xmm0, %k1 -; SKX-NEXT: vblendmps (%rdi), %xmm1, %xmm0 {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: load_v2f32_v2i32: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512VLBW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] +; AVX512VLBW-NEXT: vptestnmq %xmm0, %xmm0, %k1 +; AVX512VLBW-NEXT: vblendmps (%rdi), %xmm1, %xmm0 {%k1} +; AVX512VLBW-NEXT: retq %mask = icmp eq <2 x i32> %trigger, zeroinitializer %res = call <2 x float> @llvm.masked.load.v2f32.p0v2f32(<2 x float>* %addr, i32 4, <2 x i1>%mask, <2 x float>%dst) ret <2 x float> %res } define <2 x i32> @load_v2i32_v2i32(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %dst) { +; SSE2-LABEL: load_v2i32_v2i32: +; SSE2: ## %bb.0: +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,0,3,2] +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB11_2 +; SSE2-NEXT: ## %bb.1: ## %cond.load +; SSE2-NEXT: movl (%rdi), %eax +; SSE2-NEXT: movq %rax, %xmm2 +; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] +; SSE2-NEXT: LBB11_2: ## %else +; SSE2-NEXT: pextrw $4, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB11_4 +; SSE2-NEXT: ## %bb.3: ## %cond.load1 +; SSE2-NEXT: movl 4(%rdi), %eax +; SSE2-NEXT: movq %rax, %xmm0 +; SSE2-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE2-NEXT: LBB11_4: ## %else2 +; SSE2-NEXT: movapd %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE42-LABEL: load_v2i32_v2i32: +; SSE42: ## %bb.0: +; SSE42-NEXT: pxor %xmm2, %xmm2 +; SSE42-NEXT: movdqa %xmm0, %xmm3 +; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7] +; SSE42-NEXT: pcmpeqq %xmm2, %xmm3 +; SSE42-NEXT: pextrb $0, %xmm3, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB11_2 +; SSE42-NEXT: ## %bb.1: ## %cond.load +; SSE42-NEXT: movl (%rdi), %eax +; SSE42-NEXT: pinsrq $0, %rax, %xmm1 +; SSE42-NEXT: LBB11_2: ## %else +; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; SSE42-NEXT: pcmpeqq %xmm2, %xmm0 +; SSE42-NEXT: pextrb $8, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB11_4 +; SSE42-NEXT: ## %bb.3: ## %cond.load1 +; SSE42-NEXT: movl 4(%rdi), %eax +; SSE42-NEXT: pinsrq $1, %rax, %xmm1 +; SSE42-NEXT: LBB11_4: ## %else2 +; SSE42-NEXT: movdqa %xmm1, %xmm0 +; SSE42-NEXT: retq +; ; AVX1-LABEL: load_v2i32_v2i32: ; AVX1: ## %bb.0: ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 @@ -464,21 +1416,69 @@ define <2 x i32> @load_v2i32_v2i32(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i3 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; -; SKX-LABEL: load_v2i32_v2i32: -; SKX: ## %bb.0: -; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] -; SKX-NEXT: vptestnmq %xmm0, %xmm0, %k1 -; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] -; SKX-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1} -; SKX-NEXT: vpmovsxdq %xmm0, %xmm0 -; SKX-NEXT: retq +; AVX512VLBW-LABEL: load_v2i32_v2i32: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512VLBW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] +; AVX512VLBW-NEXT: vptestnmq %xmm0, %xmm0, %k1 +; AVX512VLBW-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] +; AVX512VLBW-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1} +; AVX512VLBW-NEXT: vpmovsxdq %xmm0, %xmm0 +; AVX512VLBW-NEXT: retq %mask = icmp eq <2 x i32> %trigger, zeroinitializer %res = call <2 x i32> @llvm.masked.load.v2i32.p0v2i32(<2 x i32>* %addr, i32 4, <2 x i1>%mask, <2 x i32>%dst) ret <2 x i32> %res } define <2 x float> @load_undef_v2f32_v2i32(<2 x i32> %trigger, <2 x float>* %addr) { +; SSE2-LABEL: load_undef_v2f32_v2i32: +; SSE2: ## %bb.0: +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,0,3,2] +; SSE2-NEXT: pand %xmm2, %xmm1 +; SSE2-NEXT: movd %xmm1, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: ## implicit-def: $xmm0 +; SSE2-NEXT: je LBB12_2 +; SSE2-NEXT: ## %bb.1: ## %cond.load +; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: LBB12_2: ## %else +; SSE2-NEXT: pextrw $4, %xmm1, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB12_4 +; SSE2-NEXT: ## %bb.3: ## %cond.load1 +; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3] +; SSE2-NEXT: movaps %xmm1, %xmm0 +; SSE2-NEXT: LBB12_4: ## %else2 +; SSE2-NEXT: retq +; +; SSE42-LABEL: load_undef_v2f32_v2i32: +; SSE42: ## %bb.0: +; SSE42-NEXT: movdqa %xmm0, %xmm1 +; SSE42-NEXT: pxor %xmm2, %xmm2 +; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; SSE42-NEXT: pcmpeqq %xmm2, %xmm0 +; SSE42-NEXT: pextrb $0, %xmm0, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: ## implicit-def: $xmm0 +; SSE42-NEXT: je LBB12_2 +; SSE42-NEXT: ## %bb.1: ## %cond.load +; SSE42-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE42-NEXT: LBB12_2: ## %else +; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] +; SSE42-NEXT: pcmpeqq %xmm2, %xmm1 +; SSE42-NEXT: pextrb $8, %xmm1, %eax +; SSE42-NEXT: testb $1, %al +; SSE42-NEXT: je LBB12_4 +; SSE42-NEXT: ## %bb.3: ## %cond.load1 +; SSE42-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3] +; SSE42-NEXT: LBB12_4: ## %else2 +; SSE42-NEXT: retq +; ; AVX1-LABEL: load_undef_v2f32_v2i32: ; AVX1: ## %bb.0: ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 @@ -509,23 +1509,28 @@ define <2 x float> @load_undef_v2f32_v2i32(<2 x i32> %trigger, <2 x float>* %add ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; -; SKX-LABEL: load_undef_v2f32_v2i32: -; SKX: ## %bb.0: -; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] -; SKX-NEXT: vptestnmq %xmm0, %xmm0, %k1 -; SKX-NEXT: vmovups (%rdi), %xmm0 {%k1} {z} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: load_undef_v2f32_v2i32: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512VLBW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; AVX512VLBW-NEXT: vptestnmq %xmm0, %xmm0, %k1 +; AVX512VLBW-NEXT: vmovups (%rdi), %xmm0 {%k1} {z} +; AVX512VLBW-NEXT: retq %mask = icmp eq <2 x i32> %trigger, zeroinitializer %res = call <2 x float> @llvm.masked.load.v2f32.p0v2f32(<2 x float>* %addr, i32 4, <2 x i1>%mask, <2 x float>undef) ret <2 x float> %res } define <4 x float> @load_all_v4f32_v4i32(<4 x i32> %trigger, <4 x float>* %addr) { -; AVX-LABEL: load_all_v4f32_v4i32: -; AVX: ## %bb.0: -; AVX-NEXT: vmovups (%rdi), %xmm0 -; AVX-NEXT: retq +; SSE-LABEL: load_all_v4f32_v4i32: +; SSE: ## %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX1OR2-LABEL: load_all_v4f32_v4i32: +; AVX1OR2: ## %bb.0: +; AVX1OR2-NEXT: vmovups (%rdi), %xmm0 +; AVX1OR2-NEXT: retq ; ; AVX512F-LABEL: load_all_v4f32_v4i32: ; AVX512F: ## %bb.0: @@ -536,11 +1541,11 @@ define <4 x float> @load_all_v4f32_v4i32(<4 x i32> %trigger, <4 x float>* %addr) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; -; SKX-LABEL: load_all_v4f32_v4i32: -; SKX: ## %bb.0: -; SKX-NEXT: kxnorw %k0, %k0, %k1 -; SKX-NEXT: vmovups (%rdi), %xmm0 {%k1} {z} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: load_all_v4f32_v4i32: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: kxnorw %k0, %k0, %k1 +; AVX512VLBW-NEXT: vmovups (%rdi), %xmm0 {%k1} {z} +; AVX512VLBW-NEXT: retq %mask = icmp eq <4 x i32> %trigger, zeroinitializer %res = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %addr, i32 4, <4 x i1>, <4 x float>undef) ret <4 x float> %res @@ -551,10 +1556,30 @@ define <4 x float> @load_all_v4f32_v4i32(<4 x i32> %trigger, <4 x float>* %addr) ; 128-bit FP vectors are supported with AVX. define <4 x float> @mload_constmask_v4f32(<4 x float>* %addr, <4 x float> %dst) { -; AVX-LABEL: mload_constmask_v4f32: -; AVX: ## %bb.0: -; AVX-NEXT: vblendps {{.*#+}} xmm0 = mem[0],xmm0[1],mem[2,3] -; AVX-NEXT: retq +; SSE2-LABEL: mload_constmask_v4f32: +; SSE2: ## %bb.0: +; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; SSE2-NEXT: movaps %xmm0, %xmm1 +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,2] +; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[2,0] +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0] +; SSE2-NEXT: retq +; +; SSE42-LABEL: mload_constmask_v4f32: +; SSE42: ## %bb.0: +; SSE42-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE42-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; SSE42-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] +; SSE42-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] +; SSE42-NEXT: retq +; +; AVX1OR2-LABEL: mload_constmask_v4f32: +; AVX1OR2: ## %bb.0: +; AVX1OR2-NEXT: vblendps {{.*#+}} xmm0 = mem[0],xmm0[1],mem[2,3] +; AVX1OR2-NEXT: retq ; ; AVX512F-LABEL: mload_constmask_v4f32: ; AVX512F: ## %bb.0: @@ -566,26 +1591,26 @@ define <4 x float> @mload_constmask_v4f32(<4 x float>* %addr, <4 x float> %dst) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; -; SKX-LABEL: mload_constmask_v4f32: -; SKX: ## %bb.0: -; SKX-NEXT: movb $13, %al -; SKX-NEXT: kmovd %eax, %k1 -; SKX-NEXT: vmovups (%rdi), %xmm0 {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: mload_constmask_v4f32: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: movb $13, %al +; AVX512VLBW-NEXT: kmovd %eax, %k1 +; AVX512VLBW-NEXT: vmovups (%rdi), %xmm0 {%k1} +; AVX512VLBW-NEXT: retq %res = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %addr, i32 4, <4 x i1> , <4 x float> %dst) ret <4 x float> %res } define <2 x double> @mload_constmask_v2f64(<2 x double>* %addr, <2 x double> %dst) { +; SSE-LABEL: mload_constmask_v2f64: +; SSE: ## %bb.0: +; SSE-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0] +; SSE-NEXT: retq +; ; AVX-LABEL: mload_constmask_v2f64: ; AVX: ## %bb.0: ; AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0] ; AVX-NEXT: retq -; -; AVX512-LABEL: mload_constmask_v2f64: -; AVX512: ## %bb.0: -; AVX512-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0] -; AVX512-NEXT: retq %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %addr, i32 4, <2 x i1> , <2 x double> %dst) ret <2 x double> %res } @@ -593,6 +1618,26 @@ define <2 x double> @mload_constmask_v2f64(<2 x double>* %addr, <2 x double> %ds ; 128-bit integer vectors are supported with AVX2. define <4 x i32> @mload_constmask_v4i32(<4 x i32>* %addr, <4 x i32> %dst) { +; SSE2-LABEL: mload_constmask_v4i32: +; SSE2: ## %bb.0: +; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0] +; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: movaps %xmm1, %xmm2 +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[0,2] +; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm2[2,0] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,0] +; SSE2-NEXT: movaps %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE42-LABEL: mload_constmask_v4i32: +; SSE42: ## %bb.0: +; SSE42-NEXT: pinsrd $1, 4(%rdi), %xmm0 +; SSE42-NEXT: pinsrd $2, 8(%rdi), %xmm0 +; SSE42-NEXT: pinsrd $3, 12(%rdi), %xmm0 +; SSE42-NEXT: retq +; ; AVX1-LABEL: mload_constmask_v4i32: ; AVX1: ## %bb.0: ; AVX1-NEXT: vmovaps {{.*#+}} xmm1 = [0,4294967295,4294967295,4294967295] @@ -617,26 +1662,32 @@ define <4 x i32> @mload_constmask_v4i32(<4 x i32>* %addr, <4 x i32> %dst) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; -; SKX-LABEL: mload_constmask_v4i32: -; SKX: ## %bb.0: -; SKX-NEXT: movb $14, %al -; SKX-NEXT: kmovd %eax, %k1 -; SKX-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: mload_constmask_v4i32: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: movb $14, %al +; AVX512VLBW-NEXT: kmovd %eax, %k1 +; AVX512VLBW-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1} +; AVX512VLBW-NEXT: retq %res = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr, i32 4, <4 x i1> , <4 x i32> %dst) ret <4 x i32> %res } define <2 x i64> @mload_constmask_v2i64(<2 x i64>* %addr, <2 x i64> %dst) { +; SSE2-LABEL: mload_constmask_v2i64: +; SSE2: ## %bb.0: +; SSE2-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero +; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-NEXT: retq +; +; SSE42-LABEL: mload_constmask_v2i64: +; SSE42: ## %bb.0: +; SSE42-NEXT: pinsrq $1, 8(%rdi), %xmm0 +; SSE42-NEXT: retq +; ; AVX-LABEL: mload_constmask_v2i64: ; AVX: ## %bb.0: ; AVX-NEXT: vpinsrq $1, 8(%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq -; -; AVX512-LABEL: mload_constmask_v2i64: -; AVX512: ## %bb.0: -; AVX512-NEXT: vpinsrq $1, 8(%rdi), %xmm0, %xmm0 -; AVX512-NEXT: retq %res = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %addr, i32 4, <2 x i1> , <2 x i64> %dst) ret <2 x i64> %res } @@ -644,12 +1695,31 @@ define <2 x i64> @mload_constmask_v2i64(<2 x i64>* %addr, <2 x i64> %dst) { ; 256-bit FP vectors are supported with AVX. define <8 x float> @mload_constmask_v8f32(<8 x float>* %addr, <8 x float> %dst) { -; AVX-LABEL: mload_constmask_v8f32: -; AVX: ## %bb.0: -; AVX-NEXT: vmovaps {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,0,0,0,0,0] -; AVX-NEXT: vmaskmovps (%rdi), %ymm1, %ymm1 -; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7] -; AVX-NEXT: retq +; SSE2-LABEL: mload_constmask_v8f32: +; SSE2: ## %bb.0: +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm3[0,0] +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm0[3,0] +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm3[0,2] +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; SSE42-LABEL: mload_constmask_v8f32: +; SSE42: ## %bb.0: +; SSE42-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE42-NEXT: blendps {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3] +; SSE42-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3] +; SSE42-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] +; SSE42-NEXT: retq +; +; AVX1OR2-LABEL: mload_constmask_v8f32: +; AVX1OR2: ## %bb.0: +; AVX1OR2-NEXT: vmovaps {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,0,0,0,0,0] +; AVX1OR2-NEXT: vmaskmovps (%rdi), %ymm1, %ymm1 +; AVX1OR2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7] +; AVX1OR2-NEXT: retq ; ; AVX512F-LABEL: mload_constmask_v8f32: ; AVX512F: ## %bb.0: @@ -660,23 +1730,29 @@ define <8 x float> @mload_constmask_v8f32(<8 x float>* %addr, <8 x float> %dst) ; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; -; SKX-LABEL: mload_constmask_v8f32: -; SKX: ## %bb.0: -; SKX-NEXT: movb $7, %al -; SKX-NEXT: kmovd %eax, %k1 -; SKX-NEXT: vmovups (%rdi), %ymm0 {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: mload_constmask_v8f32: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: movb $7, %al +; AVX512VLBW-NEXT: kmovd %eax, %k1 +; AVX512VLBW-NEXT: vmovups (%rdi), %ymm0 {%k1} +; AVX512VLBW-NEXT: retq %res = call <8 x float> @llvm.masked.load.v8f32.p0v8f32(<8 x float>* %addr, i32 4, <8 x i1> , <8 x float> %dst) ret <8 x float> %res } define <4 x double> @mload_constmask_v4f64(<4 x double>* %addr, <4 x double> %dst) { -; AVX-LABEL: mload_constmask_v4f64: -; AVX: ## %bb.0: -; AVX-NEXT: vmovapd {{.*#+}} ymm1 = [18446744073709551615,18446744073709551615,18446744073709551615,0] -; AVX-NEXT: vmaskmovpd (%rdi), %ymm1, %ymm1 -; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3] -; AVX-NEXT: retq +; SSE-LABEL: mload_constmask_v4f64: +; SSE: ## %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] +; SSE-NEXT: retq +; +; AVX1OR2-LABEL: mload_constmask_v4f64: +; AVX1OR2: ## %bb.0: +; AVX1OR2-NEXT: vmovapd {{.*#+}} ymm1 = [18446744073709551615,18446744073709551615,18446744073709551615,0] +; AVX1OR2-NEXT: vmaskmovpd (%rdi), %ymm1, %ymm1 +; AVX1OR2-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3] +; AVX1OR2-NEXT: retq ; ; AVX512F-LABEL: mload_constmask_v4f64: ; AVX512F: ## %bb.0: @@ -687,12 +1763,12 @@ define <4 x double> @mload_constmask_v4f64(<4 x double>* %addr, <4 x double> %ds ; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; -; SKX-LABEL: mload_constmask_v4f64: -; SKX: ## %bb.0: -; SKX-NEXT: movb $7, %al -; SKX-NEXT: kmovd %eax, %k1 -; SKX-NEXT: vmovupd (%rdi), %ymm0 {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: mload_constmask_v4f64: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: movb $7, %al +; AVX512VLBW-NEXT: kmovd %eax, %k1 +; AVX512VLBW-NEXT: vmovupd (%rdi), %ymm0 {%k1} +; AVX512VLBW-NEXT: retq %res = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %addr, i32 4, <4 x i1> , <4 x double> %dst) ret <4 x double> %res } @@ -700,10 +1776,32 @@ define <4 x double> @mload_constmask_v4f64(<4 x double>* %addr, <4 x double> %ds ; 256-bit integer vectors are supported with AVX2. define <8 x i32> @mload_constmask_v8i32(<8 x i32>* %addr, <8 x i32> %dst) { -; AVX-LABEL: mload_constmask_v8i32: -; AVX: ## %bb.0: -; AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2],ymm0[3,4,5,6],mem[7] -; AVX-NEXT: retq +; SSE2-LABEL: mload_constmask_v8i32: +; SSE2: ## %bb.0: +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm3[0,0] +; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm0[3,0] +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm3[0,2] +; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[2,0] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0] +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; SSE42-LABEL: mload_constmask_v8i32: +; SSE42: ## %bb.0: +; SSE42-NEXT: pinsrd $0, (%rdi), %xmm0 +; SSE42-NEXT: pinsrd $1, 4(%rdi), %xmm0 +; SSE42-NEXT: pinsrd $2, 8(%rdi), %xmm0 +; SSE42-NEXT: pinsrd $3, 28(%rdi), %xmm1 +; SSE42-NEXT: retq +; +; AVX1OR2-LABEL: mload_constmask_v8i32: +; AVX1OR2: ## %bb.0: +; AVX1OR2-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2],ymm0[3,4,5,6],mem[7] +; AVX1OR2-NEXT: retq ; ; AVX512F-LABEL: mload_constmask_v8i32: ; AVX512F: ## %bb.0: @@ -714,21 +1812,34 @@ define <8 x i32> @mload_constmask_v8i32(<8 x i32>* %addr, <8 x i32> %dst) { ; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; -; SKX-LABEL: mload_constmask_v8i32: -; SKX: ## %bb.0: -; SKX-NEXT: movb $-121, %al -; SKX-NEXT: kmovd %eax, %k1 -; SKX-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: mload_constmask_v8i32: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: movb $-121, %al +; AVX512VLBW-NEXT: kmovd %eax, %k1 +; AVX512VLBW-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1} +; AVX512VLBW-NEXT: retq %res = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* %addr, i32 4, <8 x i1> , <8 x i32> %dst) ret <8 x i32> %res } define <4 x i64> @mload_constmask_v4i64(<4 x i64>* %addr, <4 x i64> %dst) { -; AVX-LABEL: mload_constmask_v4i64: -; AVX: ## %bb.0: -; AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1],ymm0[2,3,4,5],mem[6,7] -; AVX-NEXT: retq +; SSE2-LABEL: mload_constmask_v4i64: +; SSE2: ## %bb.0: +; SSE2-NEXT: movlpd {{.*#+}} xmm0 = mem[0],xmm0[1] +; SSE2-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero +; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE2-NEXT: retq +; +; SSE42-LABEL: mload_constmask_v4i64: +; SSE42: ## %bb.0: +; SSE42-NEXT: pinsrq $0, (%rdi), %xmm0 +; SSE42-NEXT: pinsrq $1, 24(%rdi), %xmm1 +; SSE42-NEXT: retq +; +; AVX1OR2-LABEL: mload_constmask_v4i64: +; AVX1OR2: ## %bb.0: +; AVX1OR2-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1],ymm0[2,3,4,5],mem[6,7] +; AVX1OR2-NEXT: retq ; ; AVX512F-LABEL: mload_constmask_v4i64: ; AVX512F: ## %bb.0: @@ -739,12 +1850,12 @@ define <4 x i64> @mload_constmask_v4i64(<4 x i64>* %addr, <4 x i64> %dst) { ; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; -; SKX-LABEL: mload_constmask_v4i64: -; SKX: ## %bb.0: -; SKX-NEXT: movb $9, %al -; SKX-NEXT: kmovd %eax, %k1 -; SKX-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: mload_constmask_v4i64: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: movb $9, %al +; AVX512VLBW-NEXT: kmovd %eax, %k1 +; AVX512VLBW-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} +; AVX512VLBW-NEXT: retq %res = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* %addr, i32 4, <4 x i1> , <4 x i64> %dst) ret <4 x i64> %res } @@ -752,11 +1863,18 @@ define <4 x i64> @mload_constmask_v4i64(<4 x i64>* %addr, <4 x i64> %dst) { ; 512-bit FP vectors are supported with AVX512. define <8 x double> @mload_constmask_v8f64(<8 x double>* %addr, <8 x double> %dst) { -; AVX-LABEL: mload_constmask_v8f64: -; AVX: ## %bb.0: -; AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4,5],ymm0[6,7] -; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],mem[6,7] -; AVX-NEXT: retq +; SSE-LABEL: mload_constmask_v8f64: +; SSE: ## %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] +; SSE-NEXT: movhpd {{.*#+}} xmm3 = xmm3[0],mem[0] +; SSE-NEXT: retq +; +; AVX1OR2-LABEL: mload_constmask_v8f64: +; AVX1OR2: ## %bb.0: +; AVX1OR2-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4,5],ymm0[6,7] +; AVX1OR2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],mem[6,7] +; AVX1OR2-NEXT: retq ; ; AVX512F-LABEL: mload_constmask_v8f64: ; AVX512F: ## %bb.0: @@ -765,12 +1883,12 @@ define <8 x double> @mload_constmask_v8f64(<8 x double>* %addr, <8 x double> %ds ; AVX512F-NEXT: vmovupd (%rdi), %zmm0 {%k1} ; AVX512F-NEXT: retq ; -; SKX-LABEL: mload_constmask_v8f64: -; SKX: ## %bb.0: -; SKX-NEXT: movb $-121, %al -; SKX-NEXT: kmovd %eax, %k1 -; SKX-NEXT: vmovupd (%rdi), %zmm0 {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: mload_constmask_v8f64: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: movb $-121, %al +; AVX512VLBW-NEXT: kmovd %eax, %k1 +; AVX512VLBW-NEXT: vmovupd (%rdi), %zmm0 {%k1} +; AVX512VLBW-NEXT: retq %res = call <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>* %addr, i32 4, <8 x i1> , <8 x double> %dst) ret <8 x double> %res } @@ -778,11 +1896,17 @@ define <8 x double> @mload_constmask_v8f64(<8 x double>* %addr, <8 x double> %ds ; If the pass-through operand is undef, no blend is needed. define <4 x double> @mload_constmask_v4f64_undef_passthrough(<4 x double>* %addr) { -; AVX-LABEL: mload_constmask_v4f64_undef_passthrough: -; AVX: ## %bb.0: -; AVX-NEXT: vmovapd {{.*#+}} ymm0 = [18446744073709551615,18446744073709551615,18446744073709551615,0] -; AVX-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm0 -; AVX-NEXT: retq +; SSE-LABEL: mload_constmask_v4f64_undef_passthrough: +; SSE: ## %bb.0: +; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX1OR2-LABEL: mload_constmask_v4f64_undef_passthrough: +; AVX1OR2: ## %bb.0: +; AVX1OR2-NEXT: vmovapd {{.*#+}} ymm0 = [18446744073709551615,18446744073709551615,18446744073709551615,0] +; AVX1OR2-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm0 +; AVX1OR2-NEXT: retq ; ; AVX512F-LABEL: mload_constmask_v4f64_undef_passthrough: ; AVX512F: ## %bb.0: @@ -792,17 +1916,24 @@ define <4 x double> @mload_constmask_v4f64_undef_passthrough(<4 x double>* %addr ; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; -; SKX-LABEL: mload_constmask_v4f64_undef_passthrough: -; SKX: ## %bb.0: -; SKX-NEXT: movb $7, %al -; SKX-NEXT: kmovd %eax, %k1 -; SKX-NEXT: vmovupd (%rdi), %ymm0 {%k1} {z} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: mload_constmask_v4f64_undef_passthrough: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: movb $7, %al +; AVX512VLBW-NEXT: kmovd %eax, %k1 +; AVX512VLBW-NEXT: vmovupd (%rdi), %ymm0 {%k1} {z} +; AVX512VLBW-NEXT: retq %res = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %addr, i32 4, <4 x i1> , <4 x double> undef) ret <4 x double> %res } define <4 x i64> @mload_constmask_v4i64_undef_passthrough(<4 x i64>* %addr) { +; SSE-LABEL: mload_constmask_v4i64_undef_passthrough: +; SSE: ## %bb.0: +; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] +; SSE-NEXT: retq +; ; AVX1-LABEL: mload_constmask_v4i64_undef_passthrough: ; AVX1: ## %bb.0: ; AVX1-NEXT: vmovapd {{.*#+}} ymm0 = [0,18446744073709551615,18446744073709551615,0] @@ -823,12 +1954,12 @@ define <4 x i64> @mload_constmask_v4i64_undef_passthrough(<4 x i64>* %addr) { ; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; -; SKX-LABEL: mload_constmask_v4i64_undef_passthrough: -; SKX: ## %bb.0: -; SKX-NEXT: movb $6, %al -; SKX-NEXT: kmovd %eax, %k1 -; SKX-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} {z} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: mload_constmask_v4i64_undef_passthrough: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: movb $6, %al +; AVX512VLBW-NEXT: kmovd %eax, %k1 +; AVX512VLBW-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} {z} +; AVX512VLBW-NEXT: retq %res = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* %addr, i32 4, <4 x i1> , <4 x i64> undef) ret <4 x i64> %res } @@ -836,15 +1967,21 @@ define <4 x i64> @mload_constmask_v4i64_undef_passthrough(<4 x i64>* %addr) { ; When only one element of the mask is set, reduce to a scalar load. define <4 x i32> @load_one_mask_bit_set1(<4 x i32>* %addr, <4 x i32> %val) { +; SSE2-LABEL: load_one_mask_bit_set1: +; SSE2: ## %bb.0: +; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; SSE2-NEXT: retq +; +; SSE42-LABEL: load_one_mask_bit_set1: +; SSE42: ## %bb.0: +; SSE42-NEXT: pinsrd $0, (%rdi), %xmm0 +; SSE42-NEXT: retq +; ; AVX-LABEL: load_one_mask_bit_set1: ; AVX: ## %bb.0: ; AVX-NEXT: vpinsrd $0, (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq -; -; AVX512-LABEL: load_one_mask_bit_set1: -; AVX512: ## %bb.0: -; AVX512-NEXT: vpinsrd $0, (%rdi), %xmm0, %xmm0 -; AVX512-NEXT: retq %res = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr, i32 4, <4 x i1>, <4 x i32> %val) ret <4 x i32> %res } @@ -852,15 +1989,22 @@ define <4 x i32> @load_one_mask_bit_set1(<4 x i32>* %addr, <4 x i32> %val) { ; Choose a different element to show that the correct address offset is produced. define <4 x float> @load_one_mask_bit_set2(<4 x float>* %addr, <4 x float> %val) { +; SSE2-LABEL: load_one_mask_bit_set2: +; SSE2: ## %bb.0: +; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0] +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2] +; SSE2-NEXT: retq +; +; SSE42-LABEL: load_one_mask_bit_set2: +; SSE42: ## %bb.0: +; SSE42-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] +; SSE42-NEXT: retq +; ; AVX-LABEL: load_one_mask_bit_set2: ; AVX: ## %bb.0: ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] ; AVX-NEXT: retq -; -; AVX512-LABEL: load_one_mask_bit_set2: -; AVX512: ## %bb.0: -; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] -; AVX512-NEXT: retq %res = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %addr, i32 4, <4 x i1>, <4 x float> %val) ret <4 x float> %res } @@ -868,6 +2012,16 @@ define <4 x float> @load_one_mask_bit_set2(<4 x float>* %addr, <4 x float> %val) ; Choose a different scalar type and a high element of a 256-bit vector because AVX doesn't support those evenly. define <4 x i64> @load_one_mask_bit_set3(<4 x i64>* %addr, <4 x i64> %val) { +; SSE2-LABEL: load_one_mask_bit_set3: +; SSE2: ## %bb.0: +; SSE2-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] +; SSE2-NEXT: retq +; +; SSE42-LABEL: load_one_mask_bit_set3: +; SSE42: ## %bb.0: +; SSE42-NEXT: pinsrq $0, 16(%rdi), %xmm1 +; SSE42-NEXT: retq +; ; AVX1-LABEL: load_one_mask_bit_set3: ; AVX1: ## %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 @@ -895,19 +2049,17 @@ define <4 x i64> @load_one_mask_bit_set3(<4 x i64>* %addr, <4 x i64> %val) { ; Choose a different scalar type and a high element of a 256-bit vector because AVX doesn't support those evenly. define <4 x double> @load_one_mask_bit_set4(<4 x double>* %addr, <4 x double> %val) { +; SSE-LABEL: load_one_mask_bit_set4: +; SSE: ## %bb.0: +; SSE-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; SSE-NEXT: retq +; ; AVX-LABEL: load_one_mask_bit_set4: ; AVX: ## %bb.0: ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX-NEXT: retq -; -; AVX512-LABEL: load_one_mask_bit_set4: -; AVX512: ## %bb.0: -; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] -; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX512-NEXT: retq %res = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %addr, i32 4, <4 x i1>, <4 x double> %val) ret <4 x double> %res } @@ -915,12 +2067,17 @@ define <4 x double> @load_one_mask_bit_set4(<4 x double>* %addr, <4 x double> %v ; Try a 512-bit vector to make sure AVX doesn't die and AVX512 works as expected. define <8 x double> @load_one_mask_bit_set5(<8 x double>* %addr, <8 x double> %val) { -; AVX-LABEL: load_one_mask_bit_set5: -; AVX: ## %bb.0: -; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX-NEXT: vmovhpd {{.*#+}} xmm2 = xmm2[0],mem[0] -; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX-NEXT: retq +; SSE-LABEL: load_one_mask_bit_set5: +; SSE: ## %bb.0: +; SSE-NEXT: movhpd {{.*#+}} xmm3 = xmm3[0],mem[0] +; SSE-NEXT: retq +; +; AVX1OR2-LABEL: load_one_mask_bit_set5: +; AVX1OR2: ## %bb.0: +; AVX1OR2-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1OR2-NEXT: vmovhpd {{.*#+}} xmm2 = xmm2[0],mem[0] +; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1OR2-NEXT: retq ; ; AVX512-LABEL: load_one_mask_bit_set5: ; AVX512: ## %bb.0: @@ -933,6 +2090,16 @@ define <8 x double> @load_one_mask_bit_set5(<8 x double>* %addr, <8 x double> %v } define i32 @pr38986(i1 %c, i32* %p) { +; SSE-LABEL: pr38986: +; SSE: ## %bb.0: +; SSE-NEXT: testb $1, %dil +; SSE-NEXT: ## implicit-def: $eax +; SSE-NEXT: je LBB30_2 +; SSE-NEXT: ## %bb.1: ## %cond.load +; SSE-NEXT: movl (%rsi), %eax +; SSE-NEXT: LBB30_2: ## %else +; SSE-NEXT: retq +; ; AVX-LABEL: pr38986: ; AVX: ## %bb.0: ; AVX-NEXT: testb $1, %dil @@ -942,16 +2109,6 @@ define i32 @pr38986(i1 %c, i32* %p) { ; AVX-NEXT: movl (%rsi), %eax ; AVX-NEXT: LBB30_2: ## %else ; AVX-NEXT: retq -; -; AVX512-LABEL: pr38986: -; AVX512: ## %bb.0: -; AVX512-NEXT: testb $1, %dil -; AVX512-NEXT: ## implicit-def: $eax -; AVX512-NEXT: je LBB30_2 -; AVX512-NEXT: ## %bb.1: ## %cond.load -; AVX512-NEXT: movl (%rsi), %eax -; AVX512-NEXT: LBB30_2: ## %else -; AVX512-NEXT: retq %vc = insertelement <1 x i1> undef, i1 %c, i32 0 %vp = bitcast i32* %p to <1 x i32>* %L = call <1 x i32> @llvm.masked.load.v1i32.p0v1i32 (<1 x i32>* %vp, i32 4, <1 x i1> %vc, <1 x i32> undef) @@ -972,3 +2129,4 @@ declare <2 x float> @llvm.masked.load.v2f32.p0v2f32(<2 x float>*, i32, <2 x i1>, declare <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>) declare <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>*, i32, <4 x i1>, <4 x double>) declare <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>*, i32, <2 x i1>, <2 x double>) +declare <1 x double> @llvm.masked.load.v1f64.p0v1f64(<1 x double>*, i32, <1 x i1>, <1 x double>) diff --git a/test/CodeGen/X86/masked_store.ll b/test/CodeGen/X86/masked_store.ll index bdc5e09b45e..931ec7dc0e8 100644 --- a/test/CodeGen/X86/masked_store.ll +++ b/test/CodeGen/X86/masked_store.ll @@ -1,14 +1,21 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 -; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx2 < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 -; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx512f < %s | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F -; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx512f,avx512bw,avx512vl < %s | FileCheck %s --check-prefix=AVX512 --check-prefix=SKX - -; To test for the case where masked store is not legal, we should add a run with a target -; that does not have AVX, but that case should probably be a separate test file using less tests -; because it takes over 1.2 seconds to codegen these tests on Haswell 4GHz if there's no maskmov. +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=sse2 | FileCheck %s --check-prefixes=SSE,SSE2 +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=sse4.2 | FileCheck %s --check-prefixes=SSE,SSE4 +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1 +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2 +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512f,avx512bw,avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512VLBW define void @store_v1i32_v1i32(<1 x i32> %trigger, <1 x i32>* %addr, <1 x i32> %val) { +; SSE-LABEL: store_v1i32_v1i32: +; SSE: ## %bb.0: +; SSE-NEXT: testl %edi, %edi +; SSE-NEXT: jne LBB0_2 +; SSE-NEXT: ## %bb.1: ## %cond.store +; SSE-NEXT: movl %edx, (%rsi) +; SSE-NEXT: LBB0_2: ## %else +; SSE-NEXT: retq +; ; AVX-LABEL: store_v1i32_v1i32: ; AVX: ## %bb.0: ; AVX-NEXT: testl %edi, %edi @@ -17,22 +24,79 @@ define void @store_v1i32_v1i32(<1 x i32> %trigger, <1 x i32>* %addr, <1 x i32> % ; AVX-NEXT: movl %edx, (%rsi) ; AVX-NEXT: LBB0_2: ## %else ; AVX-NEXT: retq -; -; AVX512-LABEL: store_v1i32_v1i32: -; AVX512: ## %bb.0: -; AVX512-NEXT: testl %edi, %edi -; AVX512-NEXT: jne LBB0_2 -; AVX512-NEXT: ## %bb.1: ## %cond.store -; AVX512-NEXT: movl %edx, (%rsi) -; AVX512-NEXT: LBB0_2: ## %else -; AVX512-NEXT: retq %mask = icmp eq <1 x i32> %trigger, zeroinitializer call void @llvm.masked.store.v1i32.p0v1i32(<1 x i32>%val, <1 x i32>* %addr, i32 4, <1 x i1>%mask) ret void } -declare void @llvm.masked.store.v1i32.p0v1i32(<1 x i32>, <1 x i32>*, i32, <1 x i1>) define void @store_v4i32_v4i32(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %val) { +; SSE2-LABEL: store_v4i32_v4i32: +; SSE2: ## %bb.0: +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm2 +; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB1_2 +; SSE2-NEXT: ## %bb.1: ## %cond.store +; SSE2-NEXT: movd %xmm1, (%rdi) +; SSE2-NEXT: LBB1_2: ## %else +; SSE2-NEXT: pextrw $2, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB1_4 +; SSE2-NEXT: ## %bb.3: ## %cond.store1 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,2,3] +; SSE2-NEXT: movd %xmm2, 4(%rdi) +; SSE2-NEXT: LBB1_4: ## %else2 +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm2, %xmm0 +; SSE2-NEXT: pextrw $4, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB1_6 +; SSE2-NEXT: ## %bb.5: ## %cond.store3 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSE2-NEXT: movd %xmm2, 8(%rdi) +; SSE2-NEXT: LBB1_6: ## %else4 +; SSE2-NEXT: pextrw $6, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB1_8 +; SSE2-NEXT: ## %bb.7: ## %cond.store5 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3] +; SSE2-NEXT: movd %xmm0, 12(%rdi) +; SSE2-NEXT: LBB1_8: ## %else6 +; SSE2-NEXT: retq +; +; SSE4-LABEL: store_v4i32_v4i32: +; SSE4: ## %bb.0: +; SSE4-NEXT: pxor %xmm2, %xmm2 +; SSE4-NEXT: pcmpeqd %xmm0, %xmm2 +; SSE4-NEXT: pextrb $0, %xmm2, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB1_2 +; SSE4-NEXT: ## %bb.1: ## %cond.store +; SSE4-NEXT: movss %xmm1, (%rdi) +; SSE4-NEXT: LBB1_2: ## %else +; SSE4-NEXT: pextrb $4, %xmm2, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB1_4 +; SSE4-NEXT: ## %bb.3: ## %cond.store1 +; SSE4-NEXT: extractps $1, %xmm1, 4(%rdi) +; SSE4-NEXT: LBB1_4: ## %else2 +; SSE4-NEXT: pxor %xmm2, %xmm2 +; SSE4-NEXT: pcmpeqd %xmm2, %xmm0 +; SSE4-NEXT: pextrb $8, %xmm0, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB1_6 +; SSE4-NEXT: ## %bb.5: ## %cond.store3 +; SSE4-NEXT: extractps $2, %xmm1, 8(%rdi) +; SSE4-NEXT: LBB1_6: ## %else4 +; SSE4-NEXT: pextrb $12, %xmm0, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB1_8 +; SSE4-NEXT: ## %bb.7: ## %cond.store5 +; SSE4-NEXT: extractps $3, %xmm1, 12(%rdi) +; SSE4-NEXT: LBB1_8: ## %else6 +; SSE4-NEXT: retq +; ; AVX1-LABEL: store_v4i32_v4i32: ; AVX1: ## %bb.0: ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 @@ -58,17 +122,147 @@ define void @store_v4i32_v4i32(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> % ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; -; SKX-LABEL: store_v4i32_v4i32: -; SKX: ## %bb.0: -; SKX-NEXT: vptestnmd %xmm0, %xmm0, %k1 -; SKX-NEXT: vmovdqu32 %xmm1, (%rdi) {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: store_v4i32_v4i32: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vptestnmd %xmm0, %xmm0, %k1 +; AVX512VLBW-NEXT: vmovdqu32 %xmm1, (%rdi) {%k1} +; AVX512VLBW-NEXT: retq %mask = icmp eq <4 x i32> %trigger, zeroinitializer call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>%val, <4 x i32>* %addr, i32 4, <4 x i1>%mask) ret void } define void @store_v8i32_v8i32(<8 x i32> %trigger, <8 x i32>* %addr, <8 x i32> %val) { +; SSE2-LABEL: store_v8i32_v8i32: +; SSE2: ## %bb.0: +; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm4 +; SSE2-NEXT: movdqa %xmm4, %xmm5 +; SSE2-NEXT: packssdw %xmm0, %xmm5 +; SSE2-NEXT: movd %xmm5, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB2_2 +; SSE2-NEXT: ## %bb.1: ## %cond.store +; SSE2-NEXT: movd %xmm2, (%rdi) +; SSE2-NEXT: LBB2_2: ## %else +; SSE2-NEXT: packssdw %xmm0, %xmm4 +; SSE2-NEXT: movd %xmm4, %eax +; SSE2-NEXT: shrl $16, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB2_4 +; SSE2-NEXT: ## %bb.3: ## %cond.store1 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,2,3] +; SSE2-NEXT: movd %xmm4, 4(%rdi) +; SSE2-NEXT: LBB2_4: ## %else2 +; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm0 +; SSE2-NEXT: pextrw $4, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB2_6 +; SSE2-NEXT: ## %bb.5: ## %cond.store3 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[2,3,0,1] +; SSE2-NEXT: movd %xmm4, 8(%rdi) +; SSE2-NEXT: LBB2_6: ## %else4 +; SSE2-NEXT: pextrw $6, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB2_8 +; SSE2-NEXT: ## %bb.7: ## %cond.store5 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[3,1,2,3] +; SSE2-NEXT: movd %xmm0, 12(%rdi) +; SSE2-NEXT: LBB2_8: ## %else6 +; SSE2-NEXT: pxor %xmm0, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm1, %xmm0 +; SSE2-NEXT: pextrw $0, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB2_10 +; SSE2-NEXT: ## %bb.9: ## %cond.store7 +; SSE2-NEXT: movd %xmm3, 16(%rdi) +; SSE2-NEXT: LBB2_10: ## %else8 +; SSE2-NEXT: pextrw $2, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB2_12 +; SSE2-NEXT: ## %bb.11: ## %cond.store9 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3] +; SSE2-NEXT: movd %xmm0, 20(%rdi) +; SSE2-NEXT: LBB2_12: ## %else10 +; SSE2-NEXT: pxor %xmm0, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm1 +; SSE2-NEXT: pextrw $4, %xmm1, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB2_14 +; SSE2-NEXT: ## %bb.13: ## %cond.store11 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1] +; SSE2-NEXT: movd %xmm0, 24(%rdi) +; SSE2-NEXT: LBB2_14: ## %else12 +; SSE2-NEXT: pextrw $6, %xmm1, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB2_16 +; SSE2-NEXT: ## %bb.15: ## %cond.store13 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[3,1,2,3] +; SSE2-NEXT: movd %xmm0, 28(%rdi) +; SSE2-NEXT: LBB2_16: ## %else14 +; SSE2-NEXT: retq +; +; SSE4-LABEL: store_v8i32_v8i32: +; SSE4: ## %bb.0: +; SSE4-NEXT: pxor %xmm4, %xmm4 +; SSE4-NEXT: pcmpeqd %xmm0, %xmm4 +; SSE4-NEXT: pextrb $0, %xmm4, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB2_2 +; SSE4-NEXT: ## %bb.1: ## %cond.store +; SSE4-NEXT: movss %xmm2, (%rdi) +; SSE4-NEXT: LBB2_2: ## %else +; SSE4-NEXT: pextrb $4, %xmm4, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB2_4 +; SSE4-NEXT: ## %bb.3: ## %cond.store1 +; SSE4-NEXT: extractps $1, %xmm2, 4(%rdi) +; SSE4-NEXT: LBB2_4: ## %else2 +; SSE4-NEXT: pxor %xmm4, %xmm4 +; SSE4-NEXT: pcmpeqd %xmm4, %xmm0 +; SSE4-NEXT: pextrb $8, %xmm0, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB2_6 +; SSE4-NEXT: ## %bb.5: ## %cond.store3 +; SSE4-NEXT: extractps $2, %xmm2, 8(%rdi) +; SSE4-NEXT: LBB2_6: ## %else4 +; SSE4-NEXT: pextrb $12, %xmm0, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB2_8 +; SSE4-NEXT: ## %bb.7: ## %cond.store5 +; SSE4-NEXT: extractps $3, %xmm2, 12(%rdi) +; SSE4-NEXT: LBB2_8: ## %else6 +; SSE4-NEXT: pxor %xmm0, %xmm0 +; SSE4-NEXT: pcmpeqd %xmm1, %xmm0 +; SSE4-NEXT: pextrb $0, %xmm0, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB2_10 +; SSE4-NEXT: ## %bb.9: ## %cond.store7 +; SSE4-NEXT: movss %xmm3, 16(%rdi) +; SSE4-NEXT: LBB2_10: ## %else8 +; SSE4-NEXT: pextrb $4, %xmm0, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB2_12 +; SSE4-NEXT: ## %bb.11: ## %cond.store9 +; SSE4-NEXT: extractps $1, %xmm3, 20(%rdi) +; SSE4-NEXT: LBB2_12: ## %else10 +; SSE4-NEXT: pxor %xmm0, %xmm0 +; SSE4-NEXT: pcmpeqd %xmm0, %xmm1 +; SSE4-NEXT: pextrb $8, %xmm1, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB2_14 +; SSE4-NEXT: ## %bb.13: ## %cond.store11 +; SSE4-NEXT: extractps $2, %xmm3, 24(%rdi) +; SSE4-NEXT: LBB2_14: ## %else12 +; SSE4-NEXT: pextrb $12, %xmm1, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB2_16 +; SSE4-NEXT: ## %bb.15: ## %cond.store13 +; SSE4-NEXT: extractps $3, %xmm3, 28(%rdi) +; SSE4-NEXT: LBB2_16: ## %else14 +; SSE4-NEXT: retq +; ; AVX1-LABEL: store_v8i32_v8i32: ; AVX1: ## %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 @@ -99,18 +293,62 @@ define void @store_v8i32_v8i32(<8 x i32> %trigger, <8 x i32>* %addr, <8 x i32> % ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; -; SKX-LABEL: store_v8i32_v8i32: -; SKX: ## %bb.0: -; SKX-NEXT: vptestnmd %ymm0, %ymm0, %k1 -; SKX-NEXT: vmovdqu32 %ymm1, (%rdi) {%k1} -; SKX-NEXT: vzeroupper -; SKX-NEXT: retq +; AVX512VLBW-LABEL: store_v8i32_v8i32: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vptestnmd %ymm0, %ymm0, %k1 +; AVX512VLBW-NEXT: vmovdqu32 %ymm1, (%rdi) {%k1} +; AVX512VLBW-NEXT: vzeroupper +; AVX512VLBW-NEXT: retq %mask = icmp eq <8 x i32> %trigger, zeroinitializer call void @llvm.masked.store.v8i32.p0v8i32(<8 x i32>%val, <8 x i32>* %addr, i32 4, <8 x i1>%mask) ret void } define void @store_v2f32_v2i32(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %val) { +; SSE2-LABEL: store_v2f32_v2i32: +; SSE2: ## %bb.0: +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,0,3,2] +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB3_2 +; SSE2-NEXT: ## %bb.1: ## %cond.store +; SSE2-NEXT: movss %xmm1, (%rdi) +; SSE2-NEXT: LBB3_2: ## %else +; SSE2-NEXT: pextrw $4, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB3_4 +; SSE2-NEXT: ## %bb.3: ## %cond.store1 +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSE2-NEXT: movss %xmm1, 4(%rdi) +; SSE2-NEXT: LBB3_4: ## %else2 +; SSE2-NEXT: retq +; +; SSE4-LABEL: store_v2f32_v2i32: +; SSE4: ## %bb.0: +; SSE4-NEXT: pxor %xmm2, %xmm2 +; SSE4-NEXT: movdqa %xmm0, %xmm3 +; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7] +; SSE4-NEXT: pcmpeqq %xmm2, %xmm3 +; SSE4-NEXT: pextrb $0, %xmm3, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB3_2 +; SSE4-NEXT: ## %bb.1: ## %cond.store +; SSE4-NEXT: movss %xmm1, (%rdi) +; SSE4-NEXT: LBB3_2: ## %else +; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; SSE4-NEXT: pcmpeqq %xmm2, %xmm0 +; SSE4-NEXT: pextrb $8, %xmm0, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB3_4 +; SSE4-NEXT: ## %bb.3: ## %cond.store1 +; SSE4-NEXT: extractps $1, %xmm1, 4(%rdi) +; SSE4-NEXT: LBB3_4: ## %else2 +; SSE4-NEXT: retq +; ; AVX1-LABEL: store_v2f32_v2i32: ; AVX1: ## %bb.0: ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 @@ -141,19 +379,63 @@ define void @store_v2f32_v2i32(<2 x i32> %trigger, <2 x float>* %addr, <2 x floa ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; -; SKX-LABEL: store_v2f32_v2i32: -; SKX: ## %bb.0: -; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] -; SKX-NEXT: vptestnmq %xmm0, %xmm0, %k1 -; SKX-NEXT: vmovups %xmm1, (%rdi) {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: store_v2f32_v2i32: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512VLBW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] +; AVX512VLBW-NEXT: vptestnmq %xmm0, %xmm0, %k1 +; AVX512VLBW-NEXT: vmovups %xmm1, (%rdi) {%k1} +; AVX512VLBW-NEXT: retq %mask = icmp eq <2 x i32> %trigger, zeroinitializer call void @llvm.masked.store.v2f32.p0v2f32(<2 x float>%val, <2 x float>* %addr, i32 4, <2 x i1>%mask) ret void } define void @store_v2i32_v2i32(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %val) { +; SSE2-LABEL: store_v2i32_v2i32: +; SSE2: ## %bb.0: +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,0,3,2] +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB4_2 +; SSE2-NEXT: ## %bb.1: ## %cond.store +; SSE2-NEXT: movd %xmm1, (%rdi) +; SSE2-NEXT: LBB4_2: ## %else +; SSE2-NEXT: pextrw $4, %xmm0, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB4_4 +; SSE2-NEXT: ## %bb.3: ## %cond.store1 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-NEXT: movd %xmm0, 4(%rdi) +; SSE2-NEXT: LBB4_4: ## %else2 +; SSE2-NEXT: retq +; +; SSE4-LABEL: store_v2i32_v2i32: +; SSE4: ## %bb.0: +; SSE4-NEXT: pxor %xmm2, %xmm2 +; SSE4-NEXT: movdqa %xmm0, %xmm3 +; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7] +; SSE4-NEXT: pcmpeqq %xmm2, %xmm3 +; SSE4-NEXT: pextrb $0, %xmm3, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB4_2 +; SSE4-NEXT: ## %bb.1: ## %cond.store +; SSE4-NEXT: movss %xmm1, (%rdi) +; SSE4-NEXT: LBB4_2: ## %else +; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; SSE4-NEXT: pcmpeqq %xmm2, %xmm0 +; SSE4-NEXT: pextrb $8, %xmm0, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB4_4 +; SSE4-NEXT: ## %bb.3: ## %cond.store1 +; SSE4-NEXT: extractps $2, %xmm1, 4(%rdi) +; SSE4-NEXT: LBB4_4: ## %else2 +; SSE4-NEXT: retq +; ; AVX1-LABEL: store_v2i32_v2i32: ; AVX1: ## %bb.0: ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 @@ -186,19 +468,24 @@ define void @store_v2i32_v2i32(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> % ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; -; SKX-LABEL: store_v2i32_v2i32: -; SKX: ## %bb.0: -; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] -; SKX-NEXT: vptestnmq %xmm0, %xmm0, %k1 -; SKX-NEXT: vpmovqd %xmm1, (%rdi) {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: store_v2i32_v2i32: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512VLBW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] +; AVX512VLBW-NEXT: vptestnmq %xmm0, %xmm0, %k1 +; AVX512VLBW-NEXT: vpmovqd %xmm1, (%rdi) {%k1} +; AVX512VLBW-NEXT: retq %mask = icmp eq <2 x i32> %trigger, zeroinitializer call void @llvm.masked.store.v2i32.p0v2i32(<2 x i32>%val, <2 x i32>* %addr, i32 4, <2 x i1>%mask) ret void } define void @const_store_v4i32_v4i32(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %val) { +; SSE-LABEL: const_store_v4i32_v4i32: +; SSE: ## %bb.0: +; SSE-NEXT: movups %xmm1, (%rdi) +; SSE-NEXT: retq +; ; AVX1-LABEL: const_store_v4i32_v4i32: ; AVX1: ## %bb.0: ; AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 @@ -220,11 +507,11 @@ define void @const_store_v4i32_v4i32(<4 x i32> %trigger, <4 x i32>* %addr, <4 x ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; -; SKX-LABEL: const_store_v4i32_v4i32: -; SKX: ## %bb.0: -; SKX-NEXT: kxnorw %k0, %k0, %k1 -; SKX-NEXT: vmovdqu32 %xmm1, (%rdi) {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: const_store_v4i32_v4i32: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: kxnorw %k0, %k0, %k1 +; AVX512VLBW-NEXT: vmovdqu32 %xmm1, (%rdi) {%k1} +; AVX512VLBW-NEXT: retq %mask = icmp eq <4 x i32> %trigger, zeroinitializer call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>%val, <4 x i32>* %addr, i32 4, <4 x i1>) ret void @@ -233,15 +520,15 @@ define void @const_store_v4i32_v4i32(<4 x i32> %trigger, <4 x i32>* %addr, <4 x ; When only one element of the mask is set, reduce to a scalar store. define void @one_mask_bit_set1(<4 x i32>* %addr, <4 x i32> %val) { +; SSE-LABEL: one_mask_bit_set1: +; SSE: ## %bb.0: +; SSE-NEXT: movss %xmm0, (%rdi) +; SSE-NEXT: retq +; ; AVX-LABEL: one_mask_bit_set1: ; AVX: ## %bb.0: ; AVX-NEXT: vmovss %xmm0, (%rdi) ; AVX-NEXT: retq -; -; AVX512-LABEL: one_mask_bit_set1: -; AVX512: ## %bb.0: -; AVX512-NEXT: vmovss %xmm0, (%rdi) -; AVX512-NEXT: retq call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %val, <4 x i32>* %addr, i32 4, <4 x i1>) ret void } @@ -249,15 +536,21 @@ define void @one_mask_bit_set1(<4 x i32>* %addr, <4 x i32> %val) { ; Choose a different element to show that the correct address offset is produced. define void @one_mask_bit_set2(<4 x float>* %addr, <4 x float> %val) { +; SSE2-LABEL: one_mask_bit_set2: +; SSE2: ## %bb.0: +; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] +; SSE2-NEXT: movss %xmm0, 8(%rdi) +; SSE2-NEXT: retq +; +; SSE4-LABEL: one_mask_bit_set2: +; SSE4: ## %bb.0: +; SSE4-NEXT: extractps $2, %xmm0, 8(%rdi) +; SSE4-NEXT: retq +; ; AVX-LABEL: one_mask_bit_set2: ; AVX: ## %bb.0: ; AVX-NEXT: vextractps $2, %xmm0, 8(%rdi) ; AVX-NEXT: retq -; -; AVX512-LABEL: one_mask_bit_set2: -; AVX512: ## %bb.0: -; AVX512-NEXT: vextractps $2, %xmm0, 8(%rdi) -; AVX512-NEXT: retq call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %val, <4 x float>* %addr, i32 4, <4 x i1>) ret void } @@ -265,19 +558,17 @@ define void @one_mask_bit_set2(<4 x float>* %addr, <4 x float> %val) { ; Choose a different scalar type and a high element of a 256-bit vector because AVX doesn't support those evenly. define void @one_mask_bit_set3(<4 x i64>* %addr, <4 x i64> %val) { +; SSE-LABEL: one_mask_bit_set3: +; SSE: ## %bb.0: +; SSE-NEXT: movlps %xmm1, 16(%rdi) +; SSE-NEXT: retq +; ; AVX-LABEL: one_mask_bit_set3: ; AVX: ## %bb.0: ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vmovlps %xmm0, 16(%rdi) ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq -; -; AVX512-LABEL: one_mask_bit_set3: -; AVX512: ## %bb.0: -; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX512-NEXT: vmovlps %xmm0, 16(%rdi) -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: retq call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %val, <4 x i64>* %addr, i32 4, <4 x i1>) ret void } @@ -285,19 +576,17 @@ define void @one_mask_bit_set3(<4 x i64>* %addr, <4 x i64> %val) { ; Choose a different scalar type and a high element of a 256-bit vector because AVX doesn't support those evenly. define void @one_mask_bit_set4(<4 x double>* %addr, <4 x double> %val) { +; SSE-LABEL: one_mask_bit_set4: +; SSE: ## %bb.0: +; SSE-NEXT: movhpd %xmm1, 24(%rdi) +; SSE-NEXT: retq +; ; AVX-LABEL: one_mask_bit_set4: ; AVX: ## %bb.0: ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vmovhpd %xmm0, 24(%rdi) ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq -; -; AVX512-LABEL: one_mask_bit_set4: -; AVX512: ## %bb.0: -; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX512-NEXT: vmovhpd %xmm0, 24(%rdi) -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: retq call void @llvm.masked.store.v4f64.p0v4f64(<4 x double> %val, <4 x double>* %addr, i32 4, <4 x i1>) ret void } @@ -305,12 +594,17 @@ define void @one_mask_bit_set4(<4 x double>* %addr, <4 x double> %val) { ; Try a 512-bit vector to make sure AVX doesn't die and AVX512 works as expected. define void @one_mask_bit_set5(<8 x double>* %addr, <8 x double> %val) { -; AVX-LABEL: one_mask_bit_set5: -; AVX: ## %bb.0: -; AVX-NEXT: vextractf128 $1, %ymm1, %xmm0 -; AVX-NEXT: vmovlps %xmm0, 48(%rdi) -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE-LABEL: one_mask_bit_set5: +; SSE: ## %bb.0: +; SSE-NEXT: movlps %xmm3, 48(%rdi) +; SSE-NEXT: retq +; +; AVX1OR2-LABEL: one_mask_bit_set5: +; AVX1OR2: ## %bb.0: +; AVX1OR2-NEXT: vextractf128 $1, %ymm1, %xmm0 +; AVX1OR2-NEXT: vmovlps %xmm0, 48(%rdi) +; AVX1OR2-NEXT: vzeroupper +; AVX1OR2-NEXT: retq ; ; AVX512-LABEL: one_mask_bit_set5: ; AVX512: ## %bb.0: @@ -326,10 +620,79 @@ define void @one_mask_bit_set5(<8 x double>* %addr, <8 x double> %val) { ; FIXME: The AVX512 code should be improved to use 'vpmovd2m'. Add tests for 512-bit vectors when implementing that. define void @trunc_mask(<4 x float> %x, <4 x float>* %ptr, <4 x float> %y, <4 x i32> %mask) { -; AVX-LABEL: trunc_mask: -; AVX: ## %bb.0: -; AVX-NEXT: vmaskmovps %xmm0, %xmm2, (%rdi) -; AVX-NEXT: retq +; SSE2-LABEL: trunc_mask: +; SSE2: ## %bb.0: +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: pcmpgtd %xmm2, %xmm1 +; SSE2-NEXT: movd %xmm1, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB11_2 +; SSE2-NEXT: ## %bb.1: ## %cond.store +; SSE2-NEXT: movss %xmm0, (%rdi) +; SSE2-NEXT: LBB11_2: ## %else +; SSE2-NEXT: pextrw $2, %xmm1, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB11_4 +; SSE2-NEXT: ## %bb.3: ## %cond.store1 +; SSE2-NEXT: movaps %xmm0, %xmm1 +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3] +; SSE2-NEXT: movss %xmm1, 4(%rdi) +; SSE2-NEXT: LBB11_4: ## %else2 +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: pcmpgtd %xmm2, %xmm1 +; SSE2-NEXT: pextrw $4, %xmm1, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB11_6 +; SSE2-NEXT: ## %bb.5: ## %cond.store3 +; SSE2-NEXT: movaps %xmm0, %xmm2 +; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] +; SSE2-NEXT: movss %xmm2, 8(%rdi) +; SSE2-NEXT: LBB11_6: ## %else4 +; SSE2-NEXT: pextrw $6, %xmm1, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB11_8 +; SSE2-NEXT: ## %bb.7: ## %cond.store5 +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; SSE2-NEXT: movss %xmm0, 12(%rdi) +; SSE2-NEXT: LBB11_8: ## %else6 +; SSE2-NEXT: retq +; +; SSE4-LABEL: trunc_mask: +; SSE4: ## %bb.0: +; SSE4-NEXT: pxor %xmm1, %xmm1 +; SSE4-NEXT: pcmpgtd %xmm2, %xmm1 +; SSE4-NEXT: pextrb $0, %xmm1, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB11_2 +; SSE4-NEXT: ## %bb.1: ## %cond.store +; SSE4-NEXT: movss %xmm0, (%rdi) +; SSE4-NEXT: LBB11_2: ## %else +; SSE4-NEXT: pextrb $4, %xmm1, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB11_4 +; SSE4-NEXT: ## %bb.3: ## %cond.store1 +; SSE4-NEXT: extractps $1, %xmm0, 4(%rdi) +; SSE4-NEXT: LBB11_4: ## %else2 +; SSE4-NEXT: pxor %xmm1, %xmm1 +; SSE4-NEXT: pcmpgtd %xmm2, %xmm1 +; SSE4-NEXT: pextrb $8, %xmm1, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB11_6 +; SSE4-NEXT: ## %bb.5: ## %cond.store3 +; SSE4-NEXT: extractps $2, %xmm0, 8(%rdi) +; SSE4-NEXT: LBB11_6: ## %else4 +; SSE4-NEXT: pextrb $12, %xmm1, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB11_8 +; SSE4-NEXT: ## %bb.7: ## %cond.store5 +; SSE4-NEXT: extractps $3, %xmm0, 12(%rdi) +; SSE4-NEXT: LBB11_8: ## %else6 +; SSE4-NEXT: retq +; +; AVX1OR2-LABEL: trunc_mask: +; AVX1OR2: ## %bb.0: +; AVX1OR2-NEXT: vmaskmovps %xmm0, %xmm2, (%rdi) +; AVX1OR2-NEXT: retq ; ; AVX512F-LABEL: trunc_mask: ; AVX512F: ## %bb.0: @@ -343,12 +706,12 @@ define void @trunc_mask(<4 x float> %x, <4 x float>* %ptr, <4 x float> %y, <4 x ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; -; SKX-LABEL: trunc_mask: -; SKX: ## %bb.0: -; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; SKX-NEXT: vpcmpgtd %xmm2, %xmm1, %k1 -; SKX-NEXT: vmovups %xmm0, (%rdi) {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: trunc_mask: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512VLBW-NEXT: vpcmpgtd %xmm2, %xmm1, %k1 +; AVX512VLBW-NEXT: vmovups %xmm0, (%rdi) {%k1} +; AVX512VLBW-NEXT: retq %bool_mask = icmp slt <4 x i32> %mask, zeroinitializer call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %x, <4 x float>* %ptr, i32 1, <4 x i1> %bool_mask) ret void @@ -357,6 +720,62 @@ define void @trunc_mask(<4 x float> %x, <4 x float>* %ptr, <4 x float> %y, <4 x ; SimplifyDemandedBits eliminates an ashr here. define void @masked_store_bool_mask_demand_trunc_sext(<4 x double> %x, <4 x double>* %p, <4 x i32> %masksrc) { +; SSE2-LABEL: masked_store_bool_mask_demand_trunc_sext: +; SSE2: ## %bb.0: +; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB12_2 +; SSE2-NEXT: ## %bb.1: ## %cond.store +; SSE2-NEXT: movlpd %xmm0, (%rdi) +; SSE2-NEXT: LBB12_2: ## %else +; SSE2-NEXT: pextrw $2, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB12_4 +; SSE2-NEXT: ## %bb.3: ## %cond.store1 +; SSE2-NEXT: movhpd %xmm0, 8(%rdi) +; SSE2-NEXT: LBB12_4: ## %else2 +; SSE2-NEXT: pextrw $4, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB12_6 +; SSE2-NEXT: ## %bb.5: ## %cond.store3 +; SSE2-NEXT: movlpd %xmm1, 16(%rdi) +; SSE2-NEXT: LBB12_6: ## %else4 +; SSE2-NEXT: pextrw $6, %xmm2, %eax +; SSE2-NEXT: testb $1, %al +; SSE2-NEXT: je LBB12_8 +; SSE2-NEXT: ## %bb.7: ## %cond.store5 +; SSE2-NEXT: movhpd %xmm1, 24(%rdi) +; SSE2-NEXT: LBB12_8: ## %else6 +; SSE2-NEXT: retq +; +; SSE4-LABEL: masked_store_bool_mask_demand_trunc_sext: +; SSE4: ## %bb.0: +; SSE4-NEXT: pextrb $0, %xmm2, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB12_2 +; SSE4-NEXT: ## %bb.1: ## %cond.store +; SSE4-NEXT: movlpd %xmm0, (%rdi) +; SSE4-NEXT: LBB12_2: ## %else +; SSE4-NEXT: pextrb $4, %xmm2, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB12_4 +; SSE4-NEXT: ## %bb.3: ## %cond.store1 +; SSE4-NEXT: movhpd %xmm0, 8(%rdi) +; SSE4-NEXT: LBB12_4: ## %else2 +; SSE4-NEXT: pextrb $8, %xmm2, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB12_6 +; SSE4-NEXT: ## %bb.5: ## %cond.store3 +; SSE4-NEXT: movlpd %xmm1, 16(%rdi) +; SSE4-NEXT: LBB12_6: ## %else4 +; SSE4-NEXT: pextrb $12, %xmm2, %eax +; SSE4-NEXT: testb $1, %al +; SSE4-NEXT: je LBB12_8 +; SSE4-NEXT: ## %bb.7: ## %cond.store5 +; SSE4-NEXT: movhpd %xmm1, 24(%rdi) +; SSE4-NEXT: LBB12_8: ## %else6 +; SSE4-NEXT: retq +; ; AVX1-LABEL: masked_store_bool_mask_demand_trunc_sext: ; AVX1: ## %bb.0: ; AVX1-NEXT: vpslld $31, %xmm1, %xmm1 @@ -387,13 +806,13 @@ define void @masked_store_bool_mask_demand_trunc_sext(<4 x double> %x, <4 x doub ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; -; SKX-LABEL: masked_store_bool_mask_demand_trunc_sext: -; SKX: ## %bb.0: -; SKX-NEXT: vpslld $31, %xmm1, %xmm1 -; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1 -; SKX-NEXT: vmovupd %ymm0, (%rdi) {%k1} -; SKX-NEXT: vzeroupper -; SKX-NEXT: retq +; AVX512VLBW-LABEL: masked_store_bool_mask_demand_trunc_sext: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512VLBW-NEXT: vptestmd %xmm1, %xmm1, %k1 +; AVX512VLBW-NEXT: vmovupd %ymm0, (%rdi) {%k1} +; AVX512VLBW-NEXT: vzeroupper +; AVX512VLBW-NEXT: retq %sext = sext <4 x i32> %masksrc to <4 x i64> %boolmask = trunc <4 x i64> %sext to <4 x i1> call void @llvm.masked.store.v4f64.p0v4f64(<4 x double> %x, <4 x double>* %p, i32 4, <4 x i1> %boolmask) @@ -404,6 +823,56 @@ define void @masked_store_bool_mask_demand_trunc_sext(<4 x double> %x, <4 x doub ; This used to assert in type legalization. PR38436 ; FIXME: The codegen for AVX512 should use KSHIFT to zero the upper bits of the mask. define void @widen_masked_store(<3 x i32> %v, <3 x i32>* %p, <3 x i1> %mask) { +; SSE2-LABEL: widen_masked_store: +; SSE2: ## %bb.0: +; SSE2-NEXT: testb $1, %sil +; SSE2-NEXT: jne LBB13_1 +; SSE2-NEXT: ## %bb.2: ## %else +; SSE2-NEXT: testb $1, %dl +; SSE2-NEXT: jne LBB13_3 +; SSE2-NEXT: LBB13_4: ## %else2 +; SSE2-NEXT: testb $1, %cl +; SSE2-NEXT: jne LBB13_5 +; SSE2-NEXT: LBB13_6: ## %else4 +; SSE2-NEXT: retq +; SSE2-NEXT: LBB13_1: ## %cond.store +; SSE2-NEXT: movd %xmm0, (%rdi) +; SSE2-NEXT: testb $1, %dl +; SSE2-NEXT: je LBB13_4 +; SSE2-NEXT: LBB13_3: ## %cond.store1 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] +; SSE2-NEXT: movd %xmm1, 4(%rdi) +; SSE2-NEXT: testb $1, %cl +; SSE2-NEXT: je LBB13_6 +; SSE2-NEXT: LBB13_5: ## %cond.store3 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSE2-NEXT: movd %xmm0, 8(%rdi) +; SSE2-NEXT: retq +; +; SSE4-LABEL: widen_masked_store: +; SSE4: ## %bb.0: +; SSE4-NEXT: testb $1, %sil +; SSE4-NEXT: jne LBB13_1 +; SSE4-NEXT: ## %bb.2: ## %else +; SSE4-NEXT: testb $1, %dl +; SSE4-NEXT: jne LBB13_3 +; SSE4-NEXT: LBB13_4: ## %else2 +; SSE4-NEXT: testb $1, %cl +; SSE4-NEXT: jne LBB13_5 +; SSE4-NEXT: LBB13_6: ## %else4 +; SSE4-NEXT: retq +; SSE4-NEXT: LBB13_1: ## %cond.store +; SSE4-NEXT: movss %xmm0, (%rdi) +; SSE4-NEXT: testb $1, %dl +; SSE4-NEXT: je LBB13_4 +; SSE4-NEXT: LBB13_3: ## %cond.store1 +; SSE4-NEXT: extractps $1, %xmm0, 4(%rdi) +; SSE4-NEXT: testb $1, %cl +; SSE4-NEXT: je LBB13_6 +; SSE4-NEXT: LBB13_5: ## %cond.store3 +; SSE4-NEXT: extractps $2, %xmm0, 8(%rdi) +; SSE4-NEXT: retq +; ; AVX1-LABEL: widen_masked_store: ; AVX1: ## %bb.0: ; AVX1-NEXT: vmovd %edx, %xmm1 @@ -441,17 +910,17 @@ define void @widen_masked_store(<3 x i32> %v, <3 x i32>* %p, <3 x i1> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; -; SKX-LABEL: widen_masked_store: -; SKX: ## %bb.0: -; SKX-NEXT: vpslld $31, %xmm1, %xmm1 -; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1 -; SKX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 -; SKX-NEXT: vmovdqa32 %xmm1, %xmm1 {%k1} {z} -; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3] -; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1 -; SKX-NEXT: vmovdqa32 %xmm0, (%rdi) {%k1} -; SKX-NEXT: retq +; AVX512VLBW-LABEL: widen_masked_store: +; AVX512VLBW: ## %bb.0: +; AVX512VLBW-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512VLBW-NEXT: vptestmd %xmm1, %xmm1, %k1 +; AVX512VLBW-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX512VLBW-NEXT: vmovdqa32 %xmm1, %xmm1 {%k1} {z} +; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512VLBW-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3] +; AVX512VLBW-NEXT: vptestmd %xmm1, %xmm1, %k1 +; AVX512VLBW-NEXT: vmovdqa32 %xmm0, (%rdi) {%k1} +; AVX512VLBW-NEXT: retq call void @llvm.masked.store.v3i32(<3 x i32> %v, <3 x i32>* %p, i32 16, <3 x i1> %mask) ret void } @@ -462,6 +931,7 @@ declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i declare void @llvm.masked.store.v4i64.p0v4i64(<4 x i64>, <4 x i64>*, i32, <4 x i1>) declare void @llvm.masked.store.v2f32.p0v2f32(<2 x float>, <2 x float>*, i32, <2 x i1>) declare void @llvm.masked.store.v2i32.p0v2i32(<2 x i32>, <2 x i32>*, i32, <2 x i1>) +declare void @llvm.masked.store.v1i32.p0v1i32(<1 x i32>, <1 x i32>*, i32, <1 x i1>) declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>) declare void @llvm.masked.store.v8f64.p0v8f64(<8 x double>, <8 x double>*, i32, <8 x i1>) declare void @llvm.masked.store.v4f64.p0v4f64(<4 x double>, <4 x double>*, i32, <4 x i1>) -- 2.11.0