From ad818a725af6295f471e5542b1f0454f4d086839 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Mon, 23 Oct 2017 14:17:59 +0000 Subject: [PATCH] [X86][AVX] Regenerate AVX intrinsics tests on 32 + 64-bit targets git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@316325 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll | 558 ++++++++----------------- test/CodeGen/X86/avx-intrinsics-x86.ll | 452 ++++++++++++-------- test/CodeGen/X86/avx-intrinsics-x86_64.ll | 12 +- 3 files changed, 460 insertions(+), 562 deletions(-) diff --git a/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll b/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll index 0219edabac1..0451f6fce46 100644 --- a/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll +++ b/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll @@ -1,49 +1,34 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X86 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=X64 ; We don't check any vinsertf128 variant with immediate 0 because that's just a blend. define <4 x double> @test_x86_avx_vinsertf128_pd_256_1(<4 x double> %a0, <2 x double> %a1) { -; X86-LABEL: test_x86_avx_vinsertf128_pd_256_1: -; X86: # BB#0: -; X86-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_vinsertf128_pd_256_1: -; X64: # BB#0: -; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_vinsertf128_pd_256_1: +; CHECK: # BB#0: +; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; CHECK-NEXT: ret{{[l|q]}} %res = call <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double> %a0, <2 x double> %a1, i8 1) ret <4 x double> %res } declare <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double>, <2 x double>, i8) nounwind readnone define <8 x float> @test_x86_avx_vinsertf128_ps_256_1(<8 x float> %a0, <4 x float> %a1) { -; X86-LABEL: test_x86_avx_vinsertf128_ps_256_1: -; X86: # BB#0: -; X86-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_vinsertf128_ps_256_1: -; X64: # BB#0: -; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_vinsertf128_ps_256_1: +; CHECK: # BB#0: +; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; CHECK-NEXT: ret{{[l|q]}} %res = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %a0, <4 x float> %a1, i8 1) ret <8 x float> %res } declare <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float>, <4 x float>, i8) nounwind readnone define <8 x i32> @test_x86_avx_vinsertf128_si_256_1(<8 x i32> %a0, <4 x i32> %a1) { -; X86-LABEL: test_x86_avx_vinsertf128_si_256_1: -; X86: # BB#0: -; X86-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_vinsertf128_si_256_1: -; X64: # BB#0: -; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_vinsertf128_si_256_1: +; CHECK: # BB#0: +; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; CHECK-NEXT: ret{{[l|q]}} %res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> %a0, <4 x i32> %a1, i8 1) ret <8 x i32> %res } @@ -52,17 +37,11 @@ define <8 x i32> @test_x86_avx_vinsertf128_si_256_1(<8 x i32> %a0, <4 x i32> %a1 ; of a vinsertf128 $0 which should be optimized into a blend, so just check that it's ; not a vinsertf128 $1. define <8 x i32> @test_x86_avx_vinsertf128_si_256_2(<8 x i32> %a0, <4 x i32> %a1) { -; X86-LABEL: test_x86_avx_vinsertf128_si_256_2: -; X86: # BB#0: -; X86-NEXT: # kill: %XMM1 %XMM1 %YMM1 -; X86-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_vinsertf128_si_256_2: -; X64: # BB#0: -; X64-NEXT: # kill: %XMM1 %XMM1 %YMM1 -; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_vinsertf128_si_256_2: +; CHECK: # BB#0: +; CHECK-NEXT: # kill: %XMM1 %XMM1 %YMM1 +; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] +; CHECK-NEXT: ret{{[l|q]}} %res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> %a0, <4 x i32> %a1, i8 2) ret <8 x i32> %res } @@ -71,51 +50,33 @@ declare <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32>, <4 x i32>, i8) nou ; We don't check any vextractf128 variant with immediate 0 because that's just a move. define <2 x double> @test_x86_avx_vextractf128_pd_256_1(<4 x double> %a0) { -; X86-LABEL: test_x86_avx_vextractf128_pd_256_1: -; X86: # BB#0: -; X86-NEXT: vextractf128 $1, %ymm0, %xmm0 -; X86-NEXT: vzeroupper -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_vextractf128_pd_256_1: -; X64: # BB#0: -; X64-NEXT: vextractf128 $1, %ymm0, %xmm0 -; X64-NEXT: vzeroupper -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_vextractf128_pd_256_1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: ret{{[l|q]}} %res = call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a0, i8 1) ret <2 x double> %res } declare <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double>, i8) nounwind readnone define <4 x float> @test_x86_avx_vextractf128_ps_256_1(<8 x float> %a0) { -; X86-LABEL: test_x86_avx_vextractf128_ps_256_1: -; X86: # BB#0: -; X86-NEXT: vextractf128 $1, %ymm0, %xmm0 -; X86-NEXT: vzeroupper -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_vextractf128_ps_256_1: -; X64: # BB#0: -; X64-NEXT: vextractf128 $1, %ymm0, %xmm0 -; X64-NEXT: vzeroupper -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_vextractf128_ps_256_1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: ret{{[l|q]}} %res = call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %a0, i8 1) ret <4 x float> %res } declare <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float>, i8) nounwind readnone define <4 x i32> @test_x86_avx_vextractf128_si_256_1(<8 x i32> %a0) { -; X86-LABEL: test_x86_avx_vextractf128_si_256_1: -; X86: # BB#0: -; X86-NEXT: vextractf128 $1, %ymm0, %xmm0 -; X86-NEXT: vzeroupper -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_vextractf128_si_256_1: -; X64: # BB#0: -; X64-NEXT: vextractf128 $1, %ymm0, %xmm0 -; X64-NEXT: vzeroupper -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_vextractf128_si_256_1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: ret{{[l|q]}} %res = call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %a0, i8 1) ret <4 x i32> %res } @@ -125,17 +86,11 @@ declare <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32>, i8) nounwind read ; of a vextractf128 $0 which should be optimized away, so just check that it's ; not a vextractf128 of any kind. define <2 x double> @test_x86_avx_extractf128_pd_256_2(<4 x double> %a0) { -; X86-LABEL: test_x86_avx_extractf128_pd_256_2: -; X86: # BB#0: -; X86-NEXT: # kill: %XMM0 %XMM0 %YMM0 -; X86-NEXT: vzeroupper -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_extractf128_pd_256_2: -; X64: # BB#0: -; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 -; X64-NEXT: vzeroupper -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_extractf128_pd_256_2: +; CHECK: # BB#0: +; CHECK-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: ret{{[l|q]}} %res = call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a0, i8 2) ret <2 x double> %res } @@ -146,12 +101,12 @@ define <4 x double> @test_x86_avx_vbroadcastf128_pd_256(i8* %a0) { ; X86: # BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] -; X86-NEXT: retl +; X86-NEXT: ret{{[l|q]}} ; ; X64-LABEL: test_x86_avx_vbroadcastf128_pd_256: ; X64: # BB#0: ; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] -; X64-NEXT: retq +; X64-NEXT: ret{{[l|q]}} %res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %a0) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -163,12 +118,12 @@ define <8 x float> @test_x86_avx_vbroadcastf128_ps_256(i8* %a0) { ; X86: # BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] -; X86-NEXT: retl +; X86-NEXT: ret{{[l|q]}} ; ; X64-LABEL: test_x86_avx_vbroadcastf128_ps_256: ; X64: # BB#0: ; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] -; X64-NEXT: retq +; X64-NEXT: ret{{[l|q]}} %res = call <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8* %a0) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -176,15 +131,10 @@ declare <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8*) nounwind readonly define <4 x double> @test_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) { -; X86-LABEL: test_x86_avx_blend_pd_256: -; X86: # BB#0: -; X86-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3] -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_blend_pd_256: -; X64: # BB#0: -; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3] -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_blend_pd_256: +; CHECK: # BB#0: +; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3] +; CHECK-NEXT: ret{{[l|q]}} %res = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 7) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -192,15 +142,10 @@ declare <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double>, <4 x double>, i32) define <8 x float> @test_x86_avx_blend_ps_256(<8 x float> %a0, <8 x float> %a1) { -; X86-LABEL: test_x86_avx_blend_ps_256: -; X86: # BB#0: -; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7] -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_blend_ps_256: -; X64: # BB#0: -; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7] -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_blend_ps_256: +; CHECK: # BB#0: +; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7] +; CHECK-NEXT: ret{{[l|q]}} %res = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 7) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -208,15 +153,10 @@ declare <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float>, <8 x float>, i32) no define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) { -; X86-LABEL: test_x86_avx_dp_ps_256: -; X86: # BB#0: -; X86-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_dp_ps_256: -; X64: # BB#0: -; X64-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_dp_ps_256: +; CHECK: # BB#0: +; CHECK-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 +; CHECK-NEXT: ret{{[l|q]}} %res = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i32 7) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -224,15 +164,10 @@ declare <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float>, <8 x float>, i32) nounw define <2 x i64> @test_x86_sse2_psll_dq(<2 x i64> %a0) { -; X86-LABEL: test_x86_sse2_psll_dq: -; X86: # BB#0: -; X86-NEXT: vpslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse2_psll_dq: -; X64: # BB#0: -; X64-NEXT: vpslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse2_psll_dq: +; CHECK: # BB#0: +; CHECK-NEXT: vpslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] +; CHECK-NEXT: ret{{[l|q]}} %res = call <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -240,15 +175,10 @@ declare <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64>, i32) nounwind readnone define <2 x i64> @test_x86_sse2_psrl_dq(<2 x i64> %a0) { -; X86-LABEL: test_x86_sse2_psrl_dq: -; X86: # BB#0: -; X86-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse2_psrl_dq: -; X64: # BB#0: -; X64-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse2_psrl_dq: +; CHECK: # BB#0: +; CHECK-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero +; CHECK-NEXT: ret{{[l|q]}} %res = call <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -256,15 +186,10 @@ declare <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64>, i32) nounwind readnone define <2 x double> @test_x86_sse41_blendpd(<2 x double> %a0, <2 x double> %a1) { -; X86-LABEL: test_x86_sse41_blendpd: -; X86: # BB#0: -; X86-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse41_blendpd: -; X64: # BB#0: -; X64-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse41_blendpd: +; CHECK: # BB#0: +; CHECK-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] +; CHECK-NEXT: ret{{[l|q]}} %res = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i8 2) ; <<2 x double>> [#uses=1] ret <2 x double> %res } @@ -272,15 +197,10 @@ declare <2 x double> @llvm.x86.sse41.blendpd(<2 x double>, <2 x double>, i8) nou define <4 x float> @test_x86_sse41_blendps(<4 x float> %a0, <4 x float> %a1) { -; X86-LABEL: test_x86_sse41_blendps: -; X86: # BB#0: -; X86-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3] -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse41_blendps: -; X64: # BB#0: -; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3] -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse41_blendps: +; CHECK: # BB#0: +; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3] +; CHECK-NEXT: ret{{[l|q]}} %res = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1] ret <4 x float> %res } @@ -288,15 +208,10 @@ declare <4 x float> @llvm.x86.sse41.blendps(<4 x float>, <4 x float>, i8) nounwi define <8 x i16> @test_x86_sse41_pblendw(<8 x i16> %a0, <8 x i16> %a1) { -; X86-LABEL: test_x86_sse41_pblendw: -; X86: # BB#0: -; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6,7] -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse41_pblendw: -; X64: # BB#0: -; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6,7] -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse41_pblendw: +; CHECK: # BB#0: +; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6,7] +; CHECK-NEXT: ret{{[l|q]}} %res = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i8 7) ; <<8 x i16>> [#uses=1] ret <8 x i16> %res } @@ -304,15 +219,10 @@ declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i8) nounwind rea define <4 x i32> @test_x86_sse41_pmovsxbd(<16 x i8> %a0) { -; X86-LABEL: test_x86_sse41_pmovsxbd: -; X86: # BB#0: -; X86-NEXT: vpmovsxbd %xmm0, %xmm0 -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse41_pmovsxbd: -; X64: # BB#0: -; X64-NEXT: vpmovsxbd %xmm0, %xmm0 -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse41_pmovsxbd: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovsxbd %xmm0, %xmm0 +; CHECK-NEXT: ret{{[l|q]}} %res = call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1] ret <4 x i32> %res } @@ -320,15 +230,10 @@ declare <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8>) nounwind readnone define <2 x i64> @test_x86_sse41_pmovsxbq(<16 x i8> %a0) { -; X86-LABEL: test_x86_sse41_pmovsxbq: -; X86: # BB#0: -; X86-NEXT: vpmovsxbq %xmm0, %xmm0 -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse41_pmovsxbq: -; X64: # BB#0: -; X64-NEXT: vpmovsxbq %xmm0, %xmm0 -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse41_pmovsxbq: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovsxbq %xmm0, %xmm0 +; CHECK-NEXT: ret{{[l|q]}} %res = call <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -336,15 +241,10 @@ declare <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8>) nounwind readnone define <8 x i16> @test_x86_sse41_pmovsxbw(<16 x i8> %a0) { -; X86-LABEL: test_x86_sse41_pmovsxbw: -; X86: # BB#0: -; X86-NEXT: vpmovsxbw %xmm0, %xmm0 -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse41_pmovsxbw: -; X64: # BB#0: -; X64-NEXT: vpmovsxbw %xmm0, %xmm0 -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse41_pmovsxbw: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovsxbw %xmm0, %xmm0 +; CHECK-NEXT: ret{{[l|q]}} %res = call <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1] ret <8 x i16> %res } @@ -352,15 +252,10 @@ declare <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8>) nounwind readnone define <2 x i64> @test_x86_sse41_pmovsxdq(<4 x i32> %a0) { -; X86-LABEL: test_x86_sse41_pmovsxdq: -; X86: # BB#0: -; X86-NEXT: vpmovsxdq %xmm0, %xmm0 -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse41_pmovsxdq: -; X64: # BB#0: -; X64-NEXT: vpmovsxdq %xmm0, %xmm0 -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse41_pmovsxdq: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovsxdq %xmm0, %xmm0 +; CHECK-NEXT: ret{{[l|q]}} %res = call <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -368,15 +263,10 @@ declare <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32>) nounwind readnone define <4 x i32> @test_x86_sse41_pmovsxwd(<8 x i16> %a0) { -; X86-LABEL: test_x86_sse41_pmovsxwd: -; X86: # BB#0: -; X86-NEXT: vpmovsxwd %xmm0, %xmm0 -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse41_pmovsxwd: -; X64: # BB#0: -; X64-NEXT: vpmovsxwd %xmm0, %xmm0 -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse41_pmovsxwd: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovsxwd %xmm0, %xmm0 +; CHECK-NEXT: ret{{[l|q]}} %res = call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1] ret <4 x i32> %res } @@ -384,15 +274,10 @@ declare <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16>) nounwind readnone define <2 x i64> @test_x86_sse41_pmovsxwq(<8 x i16> %a0) { -; X86-LABEL: test_x86_sse41_pmovsxwq: -; X86: # BB#0: -; X86-NEXT: vpmovsxwq %xmm0, %xmm0 -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse41_pmovsxwq: -; X64: # BB#0: -; X64-NEXT: vpmovsxwq %xmm0, %xmm0 -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse41_pmovsxwq: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovsxwq %xmm0, %xmm0 +; CHECK-NEXT: ret{{[l|q]}} %res = call <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -400,15 +285,10 @@ declare <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16>) nounwind readnone define <4 x i32> @test_x86_sse41_pmovzxbd(<16 x i8> %a0) { -; X86-LABEL: test_x86_sse41_pmovzxbd: -; X86: # BB#0: -; X86-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse41_pmovzxbd: -; X64: # BB#0: -; X64-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse41_pmovzxbd: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; CHECK-NEXT: ret{{[l|q]}} %res = call <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1] ret <4 x i32> %res } @@ -416,15 +296,10 @@ declare <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8>) nounwind readnone define <2 x i64> @test_x86_sse41_pmovzxbq(<16 x i8> %a0) { -; X86-LABEL: test_x86_sse41_pmovzxbq: -; X86: # BB#0: -; X86-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse41_pmovzxbq: -; X64: # BB#0: -; X64-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse41_pmovzxbq: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero +; CHECK-NEXT: ret{{[l|q]}} %res = call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -432,15 +307,10 @@ declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>) nounwind readnone define <8 x i16> @test_x86_sse41_pmovzxbw(<16 x i8> %a0) { -; X86-LABEL: test_x86_sse41_pmovzxbw: -; X86: # BB#0: -; X86-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse41_pmovzxbw: -; X64: # BB#0: -; X64-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse41_pmovzxbw: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; CHECK-NEXT: ret{{[l|q]}} %res = call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1] ret <8 x i16> %res } @@ -448,15 +318,10 @@ declare <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8>) nounwind readnone define <2 x i64> @test_x86_sse41_pmovzxdq(<4 x i32> %a0) { -; X86-LABEL: test_x86_sse41_pmovzxdq: -; X86: # BB#0: -; X86-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse41_pmovzxdq: -; X64: # BB#0: -; X64-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse41_pmovzxdq: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero +; CHECK-NEXT: ret{{[l|q]}} %res = call <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -464,15 +329,10 @@ declare <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32>) nounwind readnone define <4 x i32> @test_x86_sse41_pmovzxwd(<8 x i16> %a0) { -; X86-LABEL: test_x86_sse41_pmovzxwd: -; X86: # BB#0: -; X86-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse41_pmovzxwd: -; X64: # BB#0: -; X64-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse41_pmovzxwd: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; CHECK-NEXT: ret{{[l|q]}} %res = call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1] ret <4 x i32> %res } @@ -480,15 +340,10 @@ declare <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16>) nounwind readnone define <2 x i64> @test_x86_sse41_pmovzxwq(<8 x i16> %a0) { -; X86-LABEL: test_x86_sse41_pmovzxwq: -; X86: # BB#0: -; X86-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse41_pmovzxwq: -; X64: # BB#0: -; X64-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse41_pmovzxwq: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero +; CHECK-NEXT: ret{{[l|q]}} %res = call <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -496,15 +351,10 @@ declare <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16>) nounwind readnone define <2 x double> @test_x86_sse2_cvtdq2pd(<4 x i32> %a0) { -; X86-LABEL: test_x86_sse2_cvtdq2pd: -; X86: # BB#0: -; X86-NEXT: vcvtdq2pd %xmm0, %xmm0 -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse2_cvtdq2pd: -; X64: # BB#0: -; X64-NEXT: vcvtdq2pd %xmm0, %xmm0 -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse2_cvtdq2pd: +; CHECK: # BB#0: +; CHECK-NEXT: vcvtdq2pd %xmm0, %xmm0 +; CHECK-NEXT: ret{{[l|q]}} %res = call <2 x double> @llvm.x86.sse2.cvtdq2pd(<4 x i32> %a0) ; <<2 x double>> [#uses=1] ret <2 x double> %res } @@ -512,15 +362,10 @@ declare <2 x double> @llvm.x86.sse2.cvtdq2pd(<4 x i32>) nounwind readnone define <4 x double> @test_x86_avx_cvtdq2_pd_256(<4 x i32> %a0) { -; X86-LABEL: test_x86_avx_cvtdq2_pd_256: -; X86: # BB#0: -; X86-NEXT: vcvtdq2pd %xmm0, %ymm0 -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_cvtdq2_pd_256: -; X64: # BB#0: -; X64-NEXT: vcvtdq2pd %xmm0, %ymm0 -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_cvtdq2_pd_256: +; CHECK: # BB#0: +; CHECK-NEXT: vcvtdq2pd %xmm0, %ymm0 +; CHECK-NEXT: ret{{[l|q]}} %res = call <4 x double> @llvm.x86.avx.cvtdq2.pd.256(<4 x i32> %a0) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -528,15 +373,10 @@ declare <4 x double> @llvm.x86.avx.cvtdq2.pd.256(<4 x i32>) nounwind readnone define <2 x double> @test_x86_sse2_cvtps2pd(<4 x float> %a0) { -; X86-LABEL: test_x86_sse2_cvtps2pd: -; X86: # BB#0: -; X86-NEXT: vcvtps2pd %xmm0, %xmm0 -; X86-NEXT: retl -; -; X64-LABEL: test_x86_sse2_cvtps2pd: -; X64: # BB#0: -; X64-NEXT: vcvtps2pd %xmm0, %xmm0 -; X64-NEXT: retq +; CHECK-LABEL: test_x86_sse2_cvtps2pd: +; CHECK: # BB#0: +; CHECK-NEXT: vcvtps2pd %xmm0, %xmm0 +; CHECK-NEXT: ret{{[l|q]}} %res = call <2 x double> @llvm.x86.sse2.cvtps2pd(<4 x float> %a0) ; <<2 x double>> [#uses=1] ret <2 x double> %res } @@ -544,15 +384,10 @@ declare <2 x double> @llvm.x86.sse2.cvtps2pd(<4 x float>) nounwind readnone define <4 x double> @test_x86_avx_cvt_ps2_pd_256(<4 x float> %a0) { -; X86-LABEL: test_x86_avx_cvt_ps2_pd_256: -; X86: # BB#0: -; X86-NEXT: vcvtps2pd %xmm0, %ymm0 -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_cvt_ps2_pd_256: -; X64: # BB#0: -; X64-NEXT: vcvtps2pd %xmm0, %ymm0 -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_cvt_ps2_pd_256: +; CHECK: # BB#0: +; CHECK-NEXT: vcvtps2pd %xmm0, %ymm0 +; CHECK-NEXT: ret{{[l|q]}} %res = call <4 x double> @llvm.x86.avx.cvt.ps2.pd.256(<4 x float> %a0) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -567,14 +402,14 @@ define void @test_x86_sse2_storeu_dq(i8* %a0, <16 x i8> %a1) { ; X86-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; X86-NEXT: vpsubb %xmm1, %xmm0, %xmm0 ; X86-NEXT: vmovdqu %xmm0, (%eax) -; X86-NEXT: retl +; X86-NEXT: ret{{[l|q]}} ; ; X64-LABEL: test_x86_sse2_storeu_dq: ; X64: # BB#0: ; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; X64-NEXT: vpsubb %xmm1, %xmm0, %xmm0 ; X64-NEXT: vmovdqu %xmm0, (%rdi) -; X64-NEXT: retq +; X64-NEXT: ret{{[l|q]}} %a2 = add <16 x i8> %a1, call void @llvm.x86.sse2.storeu.dq(i8* %a0, <16 x i8> %a2) ret void @@ -591,7 +426,7 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) { ; X86-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] ; X86-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; X86-NEXT: vmovupd %xmm0, (%eax) -; X86-NEXT: retl +; X86-NEXT: ret{{[l|q]}} ; ; X64-LABEL: test_x86_sse2_storeu_pd: ; X64: # BB#0: @@ -599,7 +434,7 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) { ; X64-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] ; X64-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; X64-NEXT: vmovupd %xmm0, (%rdi) -; X64-NEXT: retq +; X64-NEXT: ret{{[l|q]}} %a2 = fadd <2 x double> %a1, call void @llvm.x86.sse2.storeu.pd(i8* %a0, <2 x double> %a2) ret void @@ -612,12 +447,12 @@ define void @test_x86_sse_storeu_ps(i8* %a0, <4 x float> %a1) { ; X86: # BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: vmovups %xmm0, (%eax) -; X86-NEXT: retl +; X86-NEXT: ret{{[l|q]}} ; ; X64-LABEL: test_x86_sse_storeu_ps: ; X64: # BB#0: ; X64-NEXT: vmovups %xmm0, (%rdi) -; X64-NEXT: retq +; X64-NEXT: ret{{[l|q]}} call void @llvm.x86.sse.storeu.ps(i8* %a0, <4 x float> %a1) ret void } @@ -637,7 +472,7 @@ define void @test_x86_avx_storeu_dq_256(i8* %a0, <32 x i8> %a1) { ; X86-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X86-NEXT: vmovups %ymm0, (%eax) ; X86-NEXT: vzeroupper -; X86-NEXT: retl +; X86-NEXT: ret{{[l|q]}} ; ; X64-LABEL: test_x86_avx_storeu_dq_256: ; X64: # BB#0: @@ -648,7 +483,7 @@ define void @test_x86_avx_storeu_dq_256(i8* %a0, <32 x i8> %a1) { ; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X64-NEXT: vmovups %ymm0, (%rdi) ; X64-NEXT: vzeroupper -; X64-NEXT: retq +; X64-NEXT: ret{{[l|q]}} %a2 = add <32 x i8> %a1, call void @llvm.x86.avx.storeu.dq.256(i8* %a0, <32 x i8> %a2) ret void @@ -665,7 +500,7 @@ define void @test_x86_avx_storeu_pd_256(i8* %a0, <4 x double> %a1) { ; X86-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ; X86-NEXT: vmovupd %ymm0, (%eax) ; X86-NEXT: vzeroupper -; X86-NEXT: retl +; X86-NEXT: ret{{[l|q]}} ; ; X64-LABEL: test_x86_avx_storeu_pd_256: ; X64: # BB#0: @@ -673,7 +508,7 @@ define void @test_x86_avx_storeu_pd_256(i8* %a0, <4 x double> %a1) { ; X64-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ; X64-NEXT: vmovupd %ymm0, (%rdi) ; X64-NEXT: vzeroupper -; X64-NEXT: retq +; X64-NEXT: ret{{[l|q]}} %a2 = fadd <4 x double> %a1, call void @llvm.x86.avx.storeu.pd.256(i8* %a0, <4 x double> %a2) ret void @@ -687,13 +522,13 @@ define void @test_x86_avx_storeu_ps_256(i8* %a0, <8 x float> %a1) { ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: vmovups %ymm0, (%eax) ; X86-NEXT: vzeroupper -; X86-NEXT: retl +; X86-NEXT: ret{{[l|q]}} ; ; X64-LABEL: test_x86_avx_storeu_ps_256: ; X64: # BB#0: ; X64-NEXT: vmovups %ymm0, (%rdi) ; X64-NEXT: vzeroupper -; X64-NEXT: retq +; X64-NEXT: ret{{[l|q]}} call void @llvm.x86.avx.storeu.ps.256(i8* %a0, <8 x float> %a1) ret void } @@ -701,15 +536,10 @@ declare void @llvm.x86.avx.storeu.ps.256(i8*, <8 x float>) nounwind define <2 x double> @test_x86_avx_vpermil_pd(<2 x double> %a0) { -; X86-LABEL: test_x86_avx_vpermil_pd: -; X86: # BB#0: -; X86-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_vpermil_pd: -; X64: # BB#0: -; X64-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_vpermil_pd: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; CHECK-NEXT: ret{{[l|q]}} %res = call <2 x double> @llvm.x86.avx.vpermil.pd(<2 x double> %a0, i8 1) ; <<2 x double>> [#uses=1] ret <2 x double> %res } @@ -717,15 +547,10 @@ declare <2 x double> @llvm.x86.avx.vpermil.pd(<2 x double>, i8) nounwind readnon define <4 x double> @test_x86_avx_vpermil_pd_256(<4 x double> %a0) { -; X86-LABEL: test_x86_avx_vpermil_pd_256: -; X86: # BB#0: -; X86-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,2] -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_vpermil_pd_256: -; X64: # BB#0: -; X64-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,2] -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_vpermil_pd_256: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,2] +; CHECK-NEXT: ret{{[l|q]}} %res = call <4 x double> @llvm.x86.avx.vpermil.pd.256(<4 x double> %a0, i8 7) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -733,15 +558,10 @@ declare <4 x double> @llvm.x86.avx.vpermil.pd.256(<4 x double>, i8) nounwind rea define <4 x float> @test_x86_avx_vpermil_ps(<4 x float> %a0) { -; X86-LABEL: test_x86_avx_vpermil_ps: -; X86: # BB#0: -; X86-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,0] -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_vpermil_ps: -; X64: # BB#0: -; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,0] -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_vpermil_ps: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,0] +; CHECK-NEXT: ret{{[l|q]}} %res = call <4 x float> @llvm.x86.avx.vpermil.ps(<4 x float> %a0, i8 7) ; <<4 x float>> [#uses=1] ret <4 x float> %res } @@ -749,15 +569,10 @@ declare <4 x float> @llvm.x86.avx.vpermil.ps(<4 x float>, i8) nounwind readnone define <8 x float> @test_x86_avx_vpermil_ps_256(<8 x float> %a0) { -; X86-LABEL: test_x86_avx_vpermil_ps_256: -; X86: # BB#0: -; X86-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,1,0,0,7,5,4,4] -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_vpermil_ps_256: -; X64: # BB#0: -; X64-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,1,0,0,7,5,4,4] -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_vpermil_ps_256: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,1,0,0,7,5,4,4] +; CHECK-NEXT: ret{{[l|q]}} %res = call <8 x float> @llvm.x86.avx.vpermil.ps.256(<8 x float> %a0, i8 7) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -765,15 +580,10 @@ declare <8 x float> @llvm.x86.avx.vpermil.ps.256(<8 x float>, i8) nounwind readn define <4 x double> @test_x86_avx_vperm2f128_pd_256(<4 x double> %a0, <4 x double> %a1) { -; X86-LABEL: test_x86_avx_vperm2f128_pd_256: -; X86: # BB#0: -; X86-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1] -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_vperm2f128_pd_256: -; X64: # BB#0: -; X64-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1] -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_vperm2f128_pd_256: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1] +; CHECK-NEXT: ret{{[l|q]}} %res = call <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double> %a0, <4 x double> %a1, i8 3) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -781,15 +591,10 @@ declare <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double>, <4 x double>, define <8 x float> @test_x86_avx_vperm2f128_ps_256(<8 x float> %a0, <8 x float> %a1) { -; X86-LABEL: test_x86_avx_vperm2f128_ps_256: -; X86: # BB#0: -; X86-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1] -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_vperm2f128_ps_256: -; X64: # BB#0: -; X64-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1] -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_vperm2f128_ps_256: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1] +; CHECK-NEXT: ret{{[l|q]}} %res = call <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float> %a0, <8 x float> %a1, i8 3) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -797,15 +602,10 @@ declare <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float>, <8 x float>, i8 define <8 x i32> @test_x86_avx_vperm2f128_si_256(<8 x i32> %a0, <8 x i32> %a1) { -; X86-LABEL: test_x86_avx_vperm2f128_si_256: -; X86: # BB#0: -; X86-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1] -; X86-NEXT: retl -; -; X64-LABEL: test_x86_avx_vperm2f128_si_256: -; X64: # BB#0: -; X64-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1] -; X64-NEXT: retq +; CHECK-LABEL: test_x86_avx_vperm2f128_si_256: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1] +; CHECK-NEXT: ret{{[l|q]}} %res = call <8 x i32> @llvm.x86.avx.vperm2f128.si.256(<8 x i32> %a0, <8 x i32> %a1, i8 3) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } diff --git a/test/CodeGen/X86/avx-intrinsics-x86.ll b/test/CodeGen/X86/avx-intrinsics-x86.ll index 4e65790bf3d..44eb14160ee 100644 --- a/test/CodeGen/X86/avx-intrinsics-x86.ll +++ b/test/CodeGen/X86/avx-intrinsics-x86.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx,pclmul -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX -; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx,pclmul -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=X86 --check-prefix=X86-AVX +; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL --check-prefix=X86 --check-prefix=X86-AVX512VL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx,pclmul -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=X64 --check-prefix=X64-AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL --check-prefix=X64 --check-prefix=X64-AVX512VL define <4 x double> @test_x86_avx_addsub_pd_256(<4 x double> %a0, <4 x double> %a1) { ; CHECK-LABEL: test_x86_avx_addsub_pd_256: ; CHECK: # BB#0: ; CHECK-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xd0,0xc1] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -17,7 +19,7 @@ define <8 x float> @test_x86_avx_addsub_ps_256(<8 x float> %a0, <8 x float> %a1) ; CHECK-LABEL: test_x86_avx_addsub_ps_256: ; CHECK: # BB#0: ; CHECK-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xff,0xd0,0xc1] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -28,7 +30,7 @@ define <4 x double> @test_x86_avx_blendv_pd_256(<4 x double> %a0, <4 x double> % ; CHECK-LABEL: test_x86_avx_blendv_pd_256: ; CHECK: # BB#0: ; CHECK-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x4b,0xc1,0x20] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -39,7 +41,7 @@ define <8 x float> @test_x86_avx_blendv_ps_256(<8 x float> %a0, <8 x float> %a1, ; CHECK-LABEL: test_x86_avx_blendv_ps_256: ; CHECK: # BB#0: ; CHECK-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x4a,0xc1,0x20] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -50,7 +52,7 @@ define <4 x double> @test_x86_avx_cmp_pd_256(<4 x double> %a0, <4 x double> %a1) ; CHECK-LABEL: test_x86_avx_cmp_pd_256: ; CHECK: # BB#0: ; CHECK-NEXT: vcmpordpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xc2,0xc1,0x07] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double> %a0, <4 x double> %a1, i8 7) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -61,7 +63,7 @@ define <8 x float> @test_x86_avx_cmp_ps_256(<8 x float> %a0, <8 x float> %a1) { ; CHECK-LABEL: test_x86_avx_cmp_ps_256: ; CHECK: # BB#0: ; CHECK-NEXT: vcmpordps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfc,0xc2,0xc1,0x07] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -101,7 +103,7 @@ define <8 x float> @test_x86_avx_cmp_ps_256_pseudo_op(<8 x float> %a0, <8 x floa ; CHECK-NEXT: vcmpge_oqps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x1d] ; CHECK-NEXT: vcmpgt_oqps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x1e] ; CHECK-NEXT: vcmptrue_usps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfc,0xc2,0xc1,0x1f] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %a2 = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 0) ; <<8 x float>> [#uses=1] %a3 = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> %a0, <8 x float> %a2, i8 1) ; <<8 x float>> [#uses=1] %a4 = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> %a0, <8 x float> %a3, i8 2) ; <<8 x float>> [#uses=1] @@ -144,13 +146,13 @@ define <4 x float> @test_x86_avx_cvt_pd2_ps_256(<4 x double> %a0) { ; AVX: # BB#0: ; AVX-NEXT: vcvtpd2ps %ymm0, %xmm0 # encoding: [0xc5,0xfd,0x5a,0xc0] ; AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; AVX-NEXT: retl # encoding: [0xc3] +; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_cvt_pd2_ps_256: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vcvtpd2ps %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x5a,0xc0] ; AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx.cvt.pd2.ps.256(<4 x double> %a0) ; <<4 x float>> [#uses=1] ret <4 x float> %res } @@ -162,13 +164,13 @@ define <4 x i32> @test_x86_avx_cvt_pd2dq_256(<4 x double> %a0) { ; AVX: # BB#0: ; AVX-NEXT: vcvtpd2dq %ymm0, %xmm0 # encoding: [0xc5,0xff,0xe6,0xc0] ; AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; AVX-NEXT: retl # encoding: [0xc3] +; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_cvt_pd2dq_256: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vcvtpd2dq %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xff,0xe6,0xc0] ; AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double> %a0) ; <<4 x i32>> [#uses=1] ret <4 x i32> %res } @@ -179,7 +181,7 @@ define <8 x i32> @test_x86_avx_cvt_ps2dq_256(<8 x float> %a0) { ; CHECK-LABEL: test_x86_avx_cvt_ps2dq_256: ; CHECK: # BB#0: ; CHECK-NEXT: vcvtps2dq %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x5b,0xc0] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float> %a0) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -190,12 +192,12 @@ define <8 x float> @test_x86_avx_cvtdq2_ps_256(<8 x i32> %a0) { ; AVX-LABEL: test_x86_avx_cvtdq2_ps_256: ; AVX: # BB#0: ; AVX-NEXT: vcvtdq2ps %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x5b,0xc0] -; AVX-NEXT: retl # encoding: [0xc3] +; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_cvtdq2_ps_256: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vcvtdq2ps %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5b,0xc0] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.cvtdq2.ps.256(<8 x i32> %a0) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -207,13 +209,13 @@ define <4 x i32> @test_x86_avx_cvtt_pd2dq_256(<4 x double> %a0) { ; AVX: # BB#0: ; AVX-NEXT: vcvttpd2dq %ymm0, %xmm0 # encoding: [0xc5,0xfd,0xe6,0xc0] ; AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; AVX-NEXT: retl # encoding: [0xc3] +; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_cvtt_pd2dq_256: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vcvttpd2dq %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe6,0xc0] ; AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx.cvtt.pd2dq.256(<4 x double> %a0) ; <<4 x i32>> [#uses=1] ret <4 x i32> %res } @@ -224,12 +226,12 @@ define <8 x i32> @test_x86_avx_cvtt_ps2dq_256(<8 x float> %a0) { ; AVX-LABEL: test_x86_avx_cvtt_ps2dq_256: ; AVX: # BB#0: ; AVX-NEXT: vcvttps2dq %ymm0, %ymm0 # encoding: [0xc5,0xfe,0x5b,0xc0] -; AVX-NEXT: retl # encoding: [0xc3] +; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_cvtt_ps2dq_256: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vcvttps2dq %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfe,0x5b,0xc0] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float> %a0) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -240,7 +242,7 @@ define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) { ; CHECK-LABEL: test_x86_avx_dp_ps_256: ; CHECK: # BB#0: ; CHECK-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x40,0xc1,0x07] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -251,7 +253,7 @@ define <4 x double> @test_x86_avx_hadd_pd_256(<4 x double> %a0, <4 x double> %a1 ; CHECK-LABEL: test_x86_avx_hadd_pd_256: ; CHECK: # BB#0: ; CHECK-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x7c,0xc1] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -262,7 +264,7 @@ define <8 x float> @test_x86_avx_hadd_ps_256(<8 x float> %a0, <8 x float> %a1) { ; CHECK-LABEL: test_x86_avx_hadd_ps_256: ; CHECK: # BB#0: ; CHECK-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xff,0x7c,0xc1] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -273,7 +275,7 @@ define <4 x double> @test_x86_avx_hsub_pd_256(<4 x double> %a0, <4 x double> %a1 ; CHECK-LABEL: test_x86_avx_hsub_pd_256: ; CHECK: # BB#0: ; CHECK-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x7d,0xc1] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -284,7 +286,7 @@ define <8 x float> @test_x86_avx_hsub_ps_256(<8 x float> %a0, <8 x float> %a1) { ; CHECK-LABEL: test_x86_avx_hsub_ps_256: ; CHECK: # BB#0: ; CHECK-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xff,0x7d,0xc1] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -292,11 +294,16 @@ declare <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float>, <8 x float>) nounwind define <32 x i8> @test_x86_avx_ldu_dq_256(i8* %a0) { -; CHECK-LABEL: test_x86_avx_ldu_dq_256: -; CHECK: # BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] -; CHECK-NEXT: vlddqu (%eax), %ymm0 # encoding: [0xc5,0xff,0xf0,0x00] -; CHECK-NEXT: retl # encoding: [0xc3] +; X86-LABEL: test_x86_avx_ldu_dq_256: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vlddqu (%eax), %ymm0 # encoding: [0xc5,0xff,0xf0,0x00] +; X86-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; X64-LABEL: test_x86_avx_ldu_dq_256: +; X64: # BB#0: +; X64-NEXT: vlddqu (%rdi), %ymm0 # encoding: [0xc5,0xff,0xf0,0x07] +; X64-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx.ldu.dq.256(i8* %a0) ; <<32 x i8>> [#uses=1] ret <32 x i8> %res } @@ -304,11 +311,16 @@ declare <32 x i8> @llvm.x86.avx.ldu.dq.256(i8*) nounwind readonly define <2 x double> @test_x86_avx_maskload_pd(i8* %a0, <2 x i64> %mask) { -; CHECK-LABEL: test_x86_avx_maskload_pd: -; CHECK: # BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] -; CHECK-NEXT: vmaskmovpd (%eax), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x2d,0x00] -; CHECK-NEXT: retl # encoding: [0xc3] +; X86-LABEL: test_x86_avx_maskload_pd: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vmaskmovpd (%eax), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x2d,0x00] +; X86-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; X64-LABEL: test_x86_avx_maskload_pd: +; X64: # BB#0: +; X64-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x2d,0x07] +; X64-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x i64> %mask) ; <<2 x double>> [#uses=1] ret <2 x double> %res } @@ -316,11 +328,16 @@ declare <2 x double> @llvm.x86.avx.maskload.pd(i8*, <2 x i64>) nounwind readonly define <4 x double> @test_x86_avx_maskload_pd_256(i8* %a0, <4 x i64> %mask) { -; CHECK-LABEL: test_x86_avx_maskload_pd_256: -; CHECK: # BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] -; CHECK-NEXT: vmaskmovpd (%eax), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x2d,0x00] -; CHECK-NEXT: retl # encoding: [0xc3] +; X86-LABEL: test_x86_avx_maskload_pd_256: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vmaskmovpd (%eax), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x2d,0x00] +; X86-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; X64-LABEL: test_x86_avx_maskload_pd_256: +; X64: # BB#0: +; X64-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x2d,0x07] +; X64-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x i64> %mask) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -328,11 +345,16 @@ declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8*, <4 x i64>) nounwind read define <4 x float> @test_x86_avx_maskload_ps(i8* %a0, <4 x i32> %mask) { -; CHECK-LABEL: test_x86_avx_maskload_ps: -; CHECK: # BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] -; CHECK-NEXT: vmaskmovps (%eax), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x2c,0x00] -; CHECK-NEXT: retl # encoding: [0xc3] +; X86-LABEL: test_x86_avx_maskload_ps: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vmaskmovps (%eax), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x2c,0x00] +; X86-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; X64-LABEL: test_x86_avx_maskload_ps: +; X64: # BB#0: +; X64-NEXT: vmaskmovps (%rdi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x2c,0x07] +; X64-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x i32> %mask) ; <<4 x float>> [#uses=1] ret <4 x float> %res } @@ -340,11 +362,16 @@ declare <4 x float> @llvm.x86.avx.maskload.ps(i8*, <4 x i32>) nounwind readonly define <8 x float> @test_x86_avx_maskload_ps_256(i8* %a0, <8 x i32> %mask) { -; CHECK-LABEL: test_x86_avx_maskload_ps_256: -; CHECK: # BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] -; CHECK-NEXT: vmaskmovps (%eax), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x2c,0x00] -; CHECK-NEXT: retl # encoding: [0xc3] +; X86-LABEL: test_x86_avx_maskload_ps_256: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vmaskmovps (%eax), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x2c,0x00] +; X86-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; X64-LABEL: test_x86_avx_maskload_ps_256: +; X64: # BB#0: +; X64-NEXT: vmaskmovps (%rdi), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x2c,0x07] +; X64-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x i32> %mask) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -352,11 +379,16 @@ declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8*, <8 x i32>) nounwind reado define void @test_x86_avx_maskstore_pd(i8* %a0, <2 x i64> %mask, <2 x double> %a2) { -; CHECK-LABEL: test_x86_avx_maskstore_pd: -; CHECK: # BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] -; CHECK-NEXT: vmaskmovpd %xmm1, %xmm0, (%eax) # encoding: [0xc4,0xe2,0x79,0x2f,0x08] -; CHECK-NEXT: retl # encoding: [0xc3] +; X86-LABEL: test_x86_avx_maskstore_pd: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vmaskmovpd %xmm1, %xmm0, (%eax) # encoding: [0xc4,0xe2,0x79,0x2f,0x08] +; X86-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; X64-LABEL: test_x86_avx_maskstore_pd: +; X64: # BB#0: +; X64-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # encoding: [0xc4,0xe2,0x79,0x2f,0x0f] +; X64-NEXT: ret{{[l|q]}} # encoding: [0xc3] call void @llvm.x86.avx.maskstore.pd(i8* %a0, <2 x i64> %mask, <2 x double> %a2) ret void } @@ -364,12 +396,18 @@ declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x i64>, <2 x double>) nounwind define void @test_x86_avx_maskstore_pd_256(i8* %a0, <4 x i64> %mask, <4 x double> %a2) { -; CHECK-LABEL: test_x86_avx_maskstore_pd_256: -; CHECK: # BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] -; CHECK-NEXT: vmaskmovpd %ymm1, %ymm0, (%eax) # encoding: [0xc4,0xe2,0x7d,0x2f,0x08] -; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl # encoding: [0xc3] +; X86-LABEL: test_x86_avx_maskstore_pd_256: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vmaskmovpd %ymm1, %ymm0, (%eax) # encoding: [0xc4,0xe2,0x7d,0x2f,0x08] +; X86-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X86-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; X64-LABEL: test_x86_avx_maskstore_pd_256: +; X64: # BB#0: +; X64-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # encoding: [0xc4,0xe2,0x7d,0x2f,0x0f] +; X64-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X64-NEXT: ret{{[l|q]}} # encoding: [0xc3] call void @llvm.x86.avx.maskstore.pd.256(i8* %a0, <4 x i64> %mask, <4 x double> %a2) ret void } @@ -377,11 +415,16 @@ declare void @llvm.x86.avx.maskstore.pd.256(i8*, <4 x i64>, <4 x double>) nounwi define void @test_x86_avx_maskstore_ps(i8* %a0, <4 x i32> %mask, <4 x float> %a2) { -; CHECK-LABEL: test_x86_avx_maskstore_ps: -; CHECK: # BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] -; CHECK-NEXT: vmaskmovps %xmm1, %xmm0, (%eax) # encoding: [0xc4,0xe2,0x79,0x2e,0x08] -; CHECK-NEXT: retl # encoding: [0xc3] +; X86-LABEL: test_x86_avx_maskstore_ps: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vmaskmovps %xmm1, %xmm0, (%eax) # encoding: [0xc4,0xe2,0x79,0x2e,0x08] +; X86-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; X64-LABEL: test_x86_avx_maskstore_ps: +; X64: # BB#0: +; X64-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # encoding: [0xc4,0xe2,0x79,0x2e,0x0f] +; X64-NEXT: ret{{[l|q]}} # encoding: [0xc3] call void @llvm.x86.avx.maskstore.ps(i8* %a0, <4 x i32> %mask, <4 x float> %a2) ret void } @@ -389,12 +432,18 @@ declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x i32>, <4 x float>) nounwind define void @test_x86_avx_maskstore_ps_256(i8* %a0, <8 x i32> %mask, <8 x float> %a2) { -; CHECK-LABEL: test_x86_avx_maskstore_ps_256: -; CHECK: # BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] -; CHECK-NEXT: vmaskmovps %ymm1, %ymm0, (%eax) # encoding: [0xc4,0xe2,0x7d,0x2e,0x08] -; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl # encoding: [0xc3] +; X86-LABEL: test_x86_avx_maskstore_ps_256: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vmaskmovps %ymm1, %ymm0, (%eax) # encoding: [0xc4,0xe2,0x7d,0x2e,0x08] +; X86-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X86-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; X64-LABEL: test_x86_avx_maskstore_ps_256: +; X64: # BB#0: +; X64-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # encoding: [0xc4,0xe2,0x7d,0x2e,0x0f] +; X64-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X64-NEXT: ret{{[l|q]}} # encoding: [0xc3] call void @llvm.x86.avx.maskstore.ps.256(i8* %a0, <8 x i32> %mask, <8 x float> %a2) ret void } @@ -405,12 +454,12 @@ define <4 x double> @test_x86_avx_max_pd_256(<4 x double> %a0, <4 x double> %a1) ; AVX-LABEL: test_x86_avx_max_pd_256: ; AVX: # BB#0: ; AVX-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x5f,0xc1] -; AVX-NEXT: retl # encoding: [0xc3] +; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_max_pd_256: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x5f,0xc1] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -421,12 +470,12 @@ define <8 x float> @test_x86_avx_max_ps_256(<8 x float> %a0, <8 x float> %a1) { ; AVX-LABEL: test_x86_avx_max_ps_256: ; AVX: # BB#0: ; AVX-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x5f,0xc1] -; AVX-NEXT: retl # encoding: [0xc3] +; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_max_ps_256: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5f,0xc1] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -437,12 +486,12 @@ define <4 x double> @test_x86_avx_min_pd_256(<4 x double> %a0, <4 x double> %a1) ; AVX-LABEL: test_x86_avx_min_pd_256: ; AVX: # BB#0: ; AVX-NEXT: vminpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x5d,0xc1] -; AVX-NEXT: retl # encoding: [0xc3] +; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_min_pd_256: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vminpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x5d,0xc1] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -453,12 +502,12 @@ define <8 x float> @test_x86_avx_min_ps_256(<8 x float> %a0, <8 x float> %a1) { ; AVX-LABEL: test_x86_avx_min_ps_256: ; AVX: # BB#0: ; AVX-NEXT: vminps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x5d,0xc1] -; AVX-NEXT: retl # encoding: [0xc3] +; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_min_ps_256: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vminps %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5d,0xc1] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -470,7 +519,7 @@ define i32 @test_x86_avx_movmsk_pd_256(<4 x double> %a0) { ; CHECK: # BB#0: ; CHECK-NEXT: vmovmskpd %ymm0, %eax # encoding: [0xc5,0xfd,0x50,0xc0] ; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0) ; [#uses=1] ret i32 %res } @@ -482,18 +531,13 @@ define i32 @test_x86_avx_movmsk_ps_256(<8 x float> %a0) { ; CHECK: # BB#0: ; CHECK-NEXT: vmovmskps %ymm0, %eax # encoding: [0xc5,0xfc,0x50,0xc0] ; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0) ; [#uses=1] ret i32 %res } declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) nounwind readnone - - - - - define i32 @test_x86_avx_ptestc_256(<4 x i64> %a0, <4 x i64> %a1) { ; CHECK-LABEL: test_x86_avx_ptestc_256: ; CHECK: # BB#0: @@ -501,7 +545,7 @@ define i32 @test_x86_avx_ptestc_256(<4 x i64> %a0, <4 x i64> %a1) { ; CHECK-NEXT: vptest %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x17,0xc1] ; CHECK-NEXT: setb %al # encoding: [0x0f,0x92,0xc0] ; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call i32 @llvm.x86.avx.ptestc.256(<4 x i64> %a0, <4 x i64> %a1) ; [#uses=1] ret i32 %res } @@ -515,7 +559,7 @@ define i32 @test_x86_avx_ptestnzc_256(<4 x i64> %a0, <4 x i64> %a1) { ; CHECK-NEXT: vptest %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x17,0xc1] ; CHECK-NEXT: seta %al # encoding: [0x0f,0x97,0xc0] ; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call i32 @llvm.x86.avx.ptestnzc.256(<4 x i64> %a0, <4 x i64> %a1) ; [#uses=1] ret i32 %res } @@ -529,7 +573,7 @@ define i32 @test_x86_avx_ptestz_256(<4 x i64> %a0, <4 x i64> %a1) { ; CHECK-NEXT: vptest %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x17,0xc1] ; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0] ; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %a0, <4 x i64> %a1) ; [#uses=1] ret i32 %res } @@ -540,12 +584,12 @@ define <8 x float> @test_x86_avx_rcp_ps_256(<8 x float> %a0) { ; AVX-LABEL: test_x86_avx_rcp_ps_256: ; AVX: # BB#0: ; AVX-NEXT: vrcpps %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x53,0xc0] -; AVX-NEXT: retl # encoding: [0xc3] +; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_rcp_ps_256: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vrcp14ps %ymm0, %ymm0 # encoding: [0x62,0xf2,0x7d,0x28,0x4c,0xc0] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %a0) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -556,7 +600,7 @@ define <4 x double> @test_x86_avx_round_pd_256(<4 x double> %a0) { ; CHECK-LABEL: test_x86_avx_round_pd_256: ; CHECK: # BB#0: ; CHECK-NEXT: vroundpd $7, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x09,0xc0,0x07] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 7) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -567,7 +611,7 @@ define <8 x float> @test_x86_avx_round_ps_256(<8 x float> %a0) { ; CHECK-LABEL: test_x86_avx_round_ps_256: ; CHECK: # BB#0: ; CHECK-NEXT: vroundps $7, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x08,0xc0,0x07] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 7) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -578,12 +622,12 @@ define <8 x float> @test_x86_avx_rsqrt_ps_256(<8 x float> %a0) { ; AVX-LABEL: test_x86_avx_rsqrt_ps_256: ; AVX: # BB#0: ; AVX-NEXT: vrsqrtps %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x52,0xc0] -; AVX-NEXT: retl # encoding: [0xc3] +; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_rsqrt_ps_256: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vrsqrt14ps %ymm0, %ymm0 # encoding: [0x62,0xf2,0x7d,0x28,0x4e,0xc0] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %a0) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -594,7 +638,7 @@ define <4 x double> @test_x86_avx_sqrt_pd_256(<4 x double> %a0) { ; CHECK-LABEL: test_x86_avx_sqrt_pd_256: ; CHECK: # BB#0: ; CHECK-NEXT: vsqrtpd %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x51,0xc0] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %a0) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -605,7 +649,7 @@ define <8 x float> @test_x86_avx_sqrt_ps_256(<8 x float> %a0) { ; CHECK-LABEL: test_x86_avx_sqrt_ps_256: ; CHECK: # BB#0: ; CHECK-NEXT: vsqrtps %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x51,0xc0] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %a0) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -616,12 +660,12 @@ define <2 x double> @test_x86_avx_vpermilvar_pd(<2 x double> %a0, <2 x i64> %a1) ; AVX-LABEL: test_x86_avx_vpermilvar_pd: ; AVX: # BB#0: ; AVX-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0d,0xc1] -; AVX-NEXT: retl # encoding: [0xc3] +; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_vpermilvar_pd: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0d,0xc1] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %a1) ; <<2 x double>> [#uses=1] ret <2 x double> %res } @@ -632,12 +676,12 @@ define <4 x double> @test_x86_avx_vpermilvar_pd_256(<4 x double> %a0, <4 x i64> ; AVX-LABEL: test_x86_avx_vpermilvar_pd_256: ; AVX: # BB#0: ; AVX-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0d,0xc1] -; AVX-NEXT: retl # encoding: [0xc3] +; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_vpermilvar_pd_256: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0d,0xc1] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %a1) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -648,13 +692,13 @@ define <4 x double> @test_x86_avx_vpermilvar_pd_256_2(<4 x double> %a0) { ; AVX: # BB#0: ; AVX-NEXT: vpermilpd $9, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x05,0xc0,0x09] ; AVX-NEXT: # ymm0 = ymm0[1,0,2,3] -; AVX-NEXT: retl # encoding: [0xc3] +; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_vpermilvar_pd_256_2: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vpermilpd $9, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x05,0xc0,0x09] ; AVX512VL-NEXT: # ymm0 = ymm0[1,0,2,3] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> ) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -663,27 +707,37 @@ define <4 x float> @test_x86_avx_vpermilvar_ps(<4 x float> %a0, <4 x i32> %a1) { ; AVX-LABEL: test_x86_avx_vpermilvar_ps: ; AVX: # BB#0: ; AVX-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0c,0xc1] -; AVX-NEXT: retl # encoding: [0xc3] +; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_vpermilvar_ps: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0c,0xc1] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a1) ; <<4 x float>> [#uses=1] ret <4 x float> %res } define <4 x float> @test_x86_avx_vpermilvar_ps_load(<4 x float> %a0, <4 x i32>* %a1) { -; AVX-LABEL: test_x86_avx_vpermilvar_ps_load: -; AVX: # BB#0: -; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] -; AVX-NEXT: vpermilps (%eax), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0c,0x00] -; AVX-NEXT: retl # encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx_vpermilvar_ps_load: +; X86-AVX: # BB#0: +; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-AVX-NEXT: vpermilps (%eax), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0c,0x00] +; X86-AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; -; AVX512VL-LABEL: test_x86_avx_vpermilvar_ps_load: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] -; AVX512VL-NEXT: vpermilps (%eax), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0c,0x00] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; X86-AVX512VL-LABEL: test_x86_avx_vpermilvar_ps_load: +; X86-AVX512VL: # BB#0: +; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-AVX512VL-NEXT: vpermilps (%eax), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0c,0x00] +; X86-AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx_vpermilvar_ps_load: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0c,0x07] +; X64-AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx_vpermilvar_ps_load: +; X64-AVX512VL: # BB#0: +; X64-AVX512VL-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0c,0x07] +; X64-AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %a2 = load <4 x i32>, <4 x i32>* %a1 %res = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a2) ; <<4 x float>> [#uses=1] ret <4 x float> %res @@ -695,12 +749,12 @@ define <8 x float> @test_x86_avx_vpermilvar_ps_256(<8 x float> %a0, <8 x i32> %a ; AVX-LABEL: test_x86_avx_vpermilvar_ps_256: ; AVX: # BB#0: ; AVX-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0c,0xc1] -; AVX-NEXT: retl # encoding: [0xc3] +; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_vpermilvar_ps_256: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0c,0xc1] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %a1) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -713,7 +767,7 @@ define i32 @test_x86_avx_vtestc_pd(<2 x double> %a0, <2 x double> %a1) { ; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] ; CHECK-NEXT: vtestpd %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0f,0xc1] ; CHECK-NEXT: setb %al # encoding: [0x0f,0x92,0xc0] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %a1) ; [#uses=1] ret i32 %res } @@ -727,7 +781,7 @@ define i32 @test_x86_avx_vtestc_pd_256(<4 x double> %a0, <4 x double> %a1) { ; CHECK-NEXT: vtestpd %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0f,0xc1] ; CHECK-NEXT: setb %al # encoding: [0x0f,0x92,0xc0] ; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %a1) ; [#uses=1] ret i32 %res } @@ -740,7 +794,7 @@ define i32 @test_x86_avx_vtestc_ps(<4 x float> %a0, <4 x float> %a1) { ; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] ; CHECK-NEXT: vtestps %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0e,0xc1] ; CHECK-NEXT: setb %al # encoding: [0x0f,0x92,0xc0] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %a1) ; [#uses=1] ret i32 %res } @@ -754,7 +808,7 @@ define i32 @test_x86_avx_vtestc_ps_256(<8 x float> %a0, <8 x float> %a1) { ; CHECK-NEXT: vtestps %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0e,0xc1] ; CHECK-NEXT: setb %al # encoding: [0x0f,0x92,0xc0] ; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %a1) ; [#uses=1] ret i32 %res } @@ -767,7 +821,7 @@ define i32 @test_x86_avx_vtestnzc_pd(<2 x double> %a0, <2 x double> %a1) { ; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] ; CHECK-NEXT: vtestpd %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0f,0xc1] ; CHECK-NEXT: seta %al # encoding: [0x0f,0x97,0xc0] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestnzc.pd(<2 x double> %a0, <2 x double> %a1) ; [#uses=1] ret i32 %res } @@ -781,7 +835,7 @@ define i32 @test_x86_avx_vtestnzc_pd_256(<4 x double> %a0, <4 x double> %a1) { ; CHECK-NEXT: vtestpd %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0f,0xc1] ; CHECK-NEXT: seta %al # encoding: [0x0f,0x97,0xc0] ; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double> %a0, <4 x double> %a1) ; [#uses=1] ret i32 %res } @@ -794,7 +848,7 @@ define i32 @test_x86_avx_vtestnzc_ps(<4 x float> %a0, <4 x float> %a1) { ; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] ; CHECK-NEXT: vtestps %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0e,0xc1] ; CHECK-NEXT: seta %al # encoding: [0x0f,0x97,0xc0] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestnzc.ps(<4 x float> %a0, <4 x float> %a1) ; [#uses=1] ret i32 %res } @@ -808,7 +862,7 @@ define i32 @test_x86_avx_vtestnzc_ps_256(<8 x float> %a0, <8 x float> %a1) { ; CHECK-NEXT: vtestps %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0e,0xc1] ; CHECK-NEXT: seta %al # encoding: [0x0f,0x97,0xc0] ; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float> %a0, <8 x float> %a1) ; [#uses=1] ret i32 %res } @@ -821,7 +875,7 @@ define i32 @test_x86_avx_vtestz_pd(<2 x double> %a0, <2 x double> %a1) { ; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] ; CHECK-NEXT: vtestpd %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0f,0xc1] ; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestz.pd(<2 x double> %a0, <2 x double> %a1) ; [#uses=1] ret i32 %res } @@ -835,7 +889,7 @@ define i32 @test_x86_avx_vtestz_pd_256(<4 x double> %a0, <4 x double> %a1) { ; CHECK-NEXT: vtestpd %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0f,0xc1] ; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0] ; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestz.pd.256(<4 x double> %a0, <4 x double> %a1) ; [#uses=1] ret i32 %res } @@ -848,7 +902,7 @@ define i32 @test_x86_avx_vtestz_ps(<4 x float> %a0, <4 x float> %a1) { ; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] ; CHECK-NEXT: vtestps %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0e,0xc1] ; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestz.ps(<4 x float> %a0, <4 x float> %a1) ; [#uses=1] ret i32 %res } @@ -862,7 +916,7 @@ define i32 @test_x86_avx_vtestz_ps_256(<8 x float> %a0, <8 x float> %a1) { ; CHECK-NEXT: vtestps %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0e,0xc1] ; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0] ; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestz.ps.256(<8 x float> %a0, <8 x float> %a1) ; [#uses=1] ret i32 %res } @@ -873,7 +927,7 @@ define void @test_x86_avx_vzeroall() { ; CHECK-LABEL: test_x86_avx_vzeroall: ; CHECK: # BB#0: ; CHECK-NEXT: vzeroall # encoding: [0xc5,0xfc,0x77] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] call void @llvm.x86.avx.vzeroall() ret void } @@ -884,30 +938,46 @@ define void @test_x86_avx_vzeroupper() { ; CHECK-LABEL: test_x86_avx_vzeroupper: ; CHECK: # BB#0: ; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] call void @llvm.x86.avx.vzeroupper() ret void } declare void @llvm.x86.avx.vzeroupper() nounwind define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind { -; AVX-LABEL: movnt_dq: -; AVX: # BB#0: -; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] -; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9] -; AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfb,0xc1] -; AVX-NEXT: vmovntdq %ymm0, (%eax) # encoding: [0xc5,0xfd,0xe7,0x00] -; AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; AVX-NEXT: retl # encoding: [0xc3] +; X86-AVX-LABEL: movnt_dq: +; X86-AVX: # BB#0: +; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9] +; X86-AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfb,0xc1] +; X86-AVX-NEXT: vmovntdq %ymm0, (%eax) # encoding: [0xc5,0xfd,0xe7,0x00] +; X86-AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X86-AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; -; AVX512VL-LABEL: movnt_dq: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] -; AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9] -; AVX512VL-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1] -; AVX512VL-NEXT: vmovntdq %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe7,0x00] -; AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; X86-AVX512VL-LABEL: movnt_dq: +; X86-AVX512VL: # BB#0: +; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9] +; X86-AVX512VL-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1] +; X86-AVX512VL-NEXT: vmovntdq %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe7,0x00] +; X86-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X86-AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; X64-AVX-LABEL: movnt_dq: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9] +; X64-AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfb,0xc1] +; X64-AVX-NEXT: vmovntdq %ymm0, (%rdi) # encoding: [0xc5,0xfd,0xe7,0x07] +; X64-AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X64-AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; X64-AVX512VL-LABEL: movnt_dq: +; X64-AVX512VL: # BB#0: +; X64-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9] +; X64-AVX512VL-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1] +; X64-AVX512VL-NEXT: vmovntdq %ymm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe7,0x07] +; X64-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X64-AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %a2 = add <2 x i64> %a1, %a3 = shufflevector <2 x i64> %a2, <2 x i64> undef, <4 x i32> tail call void @llvm.x86.avx.movnt.dq.256(i8* %p, <4 x i64> %a3) nounwind @@ -916,19 +986,31 @@ define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind { declare void @llvm.x86.avx.movnt.dq.256(i8*, <4 x i64>) nounwind define void @movnt_ps(i8* %p, <8 x float> %a) nounwind { -; AVX-LABEL: movnt_ps: -; AVX: # BB#0: -; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] -; AVX-NEXT: vmovntps %ymm0, (%eax) # encoding: [0xc5,0xfc,0x2b,0x00] -; AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; AVX-NEXT: retl # encoding: [0xc3] +; X86-AVX-LABEL: movnt_ps: +; X86-AVX: # BB#0: +; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-AVX-NEXT: vmovntps %ymm0, (%eax) # encoding: [0xc5,0xfc,0x2b,0x00] +; X86-AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X86-AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; -; AVX512VL-LABEL: movnt_ps: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] -; AVX512VL-NEXT: vmovntps %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x2b,0x00] -; AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; X86-AVX512VL-LABEL: movnt_ps: +; X86-AVX512VL: # BB#0: +; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-AVX512VL-NEXT: vmovntps %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x2b,0x00] +; X86-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X86-AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; X64-AVX-LABEL: movnt_ps: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovntps %ymm0, (%rdi) # encoding: [0xc5,0xfc,0x2b,0x07] +; X64-AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X64-AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; X64-AVX512VL-LABEL: movnt_ps: +; X64-AVX512VL: # BB#0: +; X64-AVX512VL-NEXT: vmovntps %ymm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x2b,0x07] +; X64-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X64-AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] tail call void @llvm.x86.avx.movnt.ps.256(i8* %p, <8 x float> %a) nounwind ret void } @@ -936,23 +1018,39 @@ declare void @llvm.x86.avx.movnt.ps.256(i8*, <8 x float>) nounwind define void @movnt_pd(i8* %p, <4 x double> %a1) nounwind { ; add operation forces the execution domain. -; AVX-LABEL: movnt_pd: -; AVX: # BB#0: -; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] -; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9] -; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x58,0xc1] -; AVX-NEXT: vmovntpd %ymm0, (%eax) # encoding: [0xc5,0xfd,0x2b,0x00] -; AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; AVX-NEXT: retl # encoding: [0xc3] +; X86-AVX-LABEL: movnt_pd: +; X86-AVX: # BB#0: +; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9] +; X86-AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x58,0xc1] +; X86-AVX-NEXT: vmovntpd %ymm0, (%eax) # encoding: [0xc5,0xfd,0x2b,0x00] +; X86-AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X86-AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; -; AVX512VL-LABEL: movnt_pd: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] -; AVX512VL-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9] -; AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1] -; AVX512VL-NEXT: vmovntpd %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x2b,0x00] -; AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] -; AVX512VL-NEXT: retl # encoding: [0xc3] +; X86-AVX512VL-LABEL: movnt_pd: +; X86-AVX512VL: # BB#0: +; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-AVX512VL-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9] +; X86-AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1] +; X86-AVX512VL-NEXT: vmovntpd %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x2b,0x00] +; X86-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X86-AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; X64-AVX-LABEL: movnt_pd: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9] +; X64-AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x58,0xc1] +; X64-AVX-NEXT: vmovntpd %ymm0, (%rdi) # encoding: [0xc5,0xfd,0x2b,0x07] +; X64-AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X64-AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; X64-AVX512VL-LABEL: movnt_pd: +; X64-AVX512VL: # BB#0: +; X64-AVX512VL-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9] +; X64-AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1] +; X64-AVX512VL-NEXT: vmovntpd %ymm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x2b,0x07] +; X64-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X64-AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3] %a2 = fadd <4 x double> %a1, tail call void @llvm.x86.avx.movnt.pd.256(i8* %p, <4 x double> %a2) nounwind ret void @@ -965,7 +1063,7 @@ define <2 x i64> @test_x86_pclmulqdq(<2 x i64> %a0, <2 x i64> %a1) { ; CHECK-LABEL: test_x86_pclmulqdq: ; CHECK: # BB#0: ; CHECK-NEXT: vpclmulqdq $0, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x44,0xc1,0x00] -; CHECK-NEXT: retl # encoding: [0xc3] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] %res = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %a0, <2 x i64> %a1, i8 0) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } diff --git a/test/CodeGen/X86/avx-intrinsics-x86_64.ll b/test/CodeGen/X86/avx-intrinsics-x86_64.ll index 8d4f4428ae9..11f560a5c44 100644 --- a/test/CodeGen/X86/avx-intrinsics-x86_64.ll +++ b/test/CodeGen/X86/avx-intrinsics-x86_64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7 -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7 -mattr=+avx512vl | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL define <4 x double> @test_x86_avx_vzeroall(<4 x double> %a, <4 x double> %b) { ; AVX-LABEL: test_x86_avx_vzeroall: @@ -9,14 +9,14 @@ define <4 x double> @test_x86_avx_vzeroall(<4 x double> %a, <4 x double> %b) { ; AVX-NEXT: vmovupd %ymm0, -{{[0-9]+}}(%rsp) # 32-byte Spill ; AVX-NEXT: vzeroall ; AVX-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload -; AVX-NEXT: retq +; AVX-NEXT: ret{{[l|q]}} ; ; AVX512VL-LABEL: test_x86_avx_vzeroall: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm16 ; AVX512VL-NEXT: vzeroall ; AVX512VL-NEXT: vmovapd %ymm16, %ymm0 -; AVX512VL-NEXT: retq +; AVX512VL-NEXT: ret{{[l|q]}} %c = fadd <4 x double> %a, %b call void @llvm.x86.avx.vzeroall() ret <4 x double> %c @@ -30,14 +30,14 @@ define <4 x double> @test_x86_avx_vzeroupper(<4 x double> %a, <4 x double> %b) { ; AVX-NEXT: vmovupd %ymm0, -{{[0-9]+}}(%rsp) # 32-byte Spill ; AVX-NEXT: vzeroupper ; AVX-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload -; AVX-NEXT: retq +; AVX-NEXT: ret{{[l|q]}} ; ; AVX512VL-LABEL: test_x86_avx_vzeroupper: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm16 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: vmovapd %ymm16, %ymm0 -; AVX512VL-NEXT: retq +; AVX512VL-NEXT: ret{{[l|q]}} %c = fadd <4 x double> %a, %b call void @llvm.x86.avx.vzeroupper() ret <4 x double> %c -- 2.11.0