From: Simon Pilgrim Date: Mon, 30 May 2016 18:49:57 +0000 (+0000) Subject: [X86][AVX2] Regenerated AVX2 extension tests X-Git-Tag: android-x86-7.1-r4~32562 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=16a2da8f0e6271918aec64f520be12ad90702a0e;p=android-x86%2Fexternal-llvm.git [X86][AVX2] Regenerated AVX2 extension tests git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@271224 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll b/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll index f281bbaa675..1d0626f66ee 100644 --- a/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll +++ b/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll @@ -1,16 +1,34 @@ -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=X64 define <16 x i16> @test_llvm_x86_avx2_pmovsxbw(<16 x i8>* %a) { -; CHECK-LABEL: test_llvm_x86_avx2_pmovsxbw -; CHECK: vpmovsxbw (%rdi), %ymm0 +; X32-LABEL: test_llvm_x86_avx2_pmovsxbw: +; X32: ## BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vpmovsxbw (%eax), %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_llvm_x86_avx2_pmovsxbw: +; X64: ## BB#0: +; X64-NEXT: vpmovsxbw (%rdi), %ymm0 +; X64-NEXT: retq %1 = load <16 x i8>, <16 x i8>* %a, align 1 %2 = sext <16 x i8> %1 to <16 x i16> ret <16 x i16> %2 } define <8 x i32> @test_llvm_x86_avx2_pmovsxbd(<16 x i8>* %a) { -; CHECK-LABEL: test_llvm_x86_avx2_pmovsxbd -; CHECK: vpmovsxbd (%rdi), %ymm0 +; X32-LABEL: test_llvm_x86_avx2_pmovsxbd: +; X32: ## BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vpmovsxbd (%eax), %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_llvm_x86_avx2_pmovsxbd: +; X64: ## BB#0: +; X64-NEXT: vpmovsxbd (%rdi), %ymm0 +; X64-NEXT: retq %1 = load <16 x i8>, <16 x i8>* %a, align 1 %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <8 x i32> %3 = sext <8 x i8> %2 to <8 x i32> @@ -18,8 +36,16 @@ define <8 x i32> @test_llvm_x86_avx2_pmovsxbd(<16 x i8>* %a) { } define <4 x i64> @test_llvm_x86_avx2_pmovsxbq(<16 x i8>* %a) { -; CHECK-LABEL: test_llvm_x86_avx2_pmovsxbq -; CHECK: vpmovsxbq (%rdi), %ymm0 +; X32-LABEL: test_llvm_x86_avx2_pmovsxbq: +; X32: ## BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vpmovsxbq (%eax), %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_llvm_x86_avx2_pmovsxbq: +; X64: ## BB#0: +; X64-NEXT: vpmovsxbq (%rdi), %ymm0 +; X64-NEXT: retq %1 = load <16 x i8>, <16 x i8>* %a, align 1 %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <4 x i32> %3 = sext <4 x i8> %2 to <4 x i64> @@ -27,16 +53,32 @@ define <4 x i64> @test_llvm_x86_avx2_pmovsxbq(<16 x i8>* %a) { } define <8 x i32> @test_llvm_x86_avx2_pmovsxwd(<8 x i16>* %a) { -; CHECK-LABEL: test_llvm_x86_avx2_pmovsxwd -; CHECK: vpmovsxwd (%rdi), %ymm0 +; X32-LABEL: test_llvm_x86_avx2_pmovsxwd: +; X32: ## BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vpmovsxwd (%eax), %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_llvm_x86_avx2_pmovsxwd: +; X64: ## BB#0: +; X64-NEXT: vpmovsxwd (%rdi), %ymm0 +; X64-NEXT: retq %1 = load <8 x i16>, <8 x i16>* %a, align 1 %2 = sext <8 x i16> %1 to <8 x i32> ret <8 x i32> %2 } define <4 x i64> @test_llvm_x86_avx2_pmovsxwq(<8 x i16>* %a) { -; CHECK-LABEL: test_llvm_x86_avx2_pmovsxwq -; CHECK: vpmovsxwq (%rdi), %ymm0 +; X32-LABEL: test_llvm_x86_avx2_pmovsxwq: +; X32: ## BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vpmovsxwq (%eax), %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_llvm_x86_avx2_pmovsxwq: +; X64: ## BB#0: +; X64-NEXT: vpmovsxwq (%rdi), %ymm0 +; X64-NEXT: retq %1 = load <8 x i16>, <8 x i16>* %a, align 1 %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <4 x i32> %3 = sext <4 x i16> %2 to <4 x i64> @@ -44,24 +86,48 @@ define <4 x i64> @test_llvm_x86_avx2_pmovsxwq(<8 x i16>* %a) { } define <4 x i64> @test_llvm_x86_avx2_pmovsxdq(<4 x i32>* %a) { -; CHECK-LABEL: test_llvm_x86_avx2_pmovsxdq -; CHECK: vpmovsxdq (%rdi), %ymm0 +; X32-LABEL: test_llvm_x86_avx2_pmovsxdq: +; X32: ## BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vpmovsxdq (%eax), %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_llvm_x86_avx2_pmovsxdq: +; X64: ## BB#0: +; X64-NEXT: vpmovsxdq (%rdi), %ymm0 +; X64-NEXT: retq %1 = load <4 x i32>, <4 x i32>* %a, align 1 %2 = sext <4 x i32> %1 to <4 x i64> ret <4 x i64> %2 } define <16 x i16> @test_llvm_x86_avx2_pmovzxbw(<16 x i8>* %a) { -; CHECK-LABEL: test_llvm_x86_avx2_pmovzxbw -; CHECK: vpmovzxbw (%rdi), %ymm0 +; X32-LABEL: test_llvm_x86_avx2_pmovzxbw: +; X32: ## BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; X32-NEXT: retl +; +; X64-LABEL: test_llvm_x86_avx2_pmovzxbw: +; X64: ## BB#0: +; X64-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; X64-NEXT: retq %1 = load <16 x i8>, <16 x i8>* %a, align 1 %2 = zext <16 x i8> %1 to <16 x i16> ret <16 x i16> %2 } define <8 x i32> @test_llvm_x86_avx2_pmovzxbd(<16 x i8>* %a) { -; CHECK-LABEL: test_llvm_x86_avx2_pmovzxbd -; CHECK: vpmovzxbd (%rdi), %ymm0 +; X32-LABEL: test_llvm_x86_avx2_pmovzxbd: +; X32: ## BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero +; X32-NEXT: retl +; +; X64-LABEL: test_llvm_x86_avx2_pmovzxbd: +; X64: ## BB#0: +; X64-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero +; X64-NEXT: retq %1 = load <16 x i8>, <16 x i8>* %a, align 1 %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <8 x i32> %3 = zext <8 x i8> %2 to <8 x i32> @@ -69,8 +135,16 @@ define <8 x i32> @test_llvm_x86_avx2_pmovzxbd(<16 x i8>* %a) { } define <4 x i64> @test_llvm_x86_avx2_pmovzxbq(<16 x i8>* %a) { -; CHECK-LABEL: test_llvm_x86_avx2_pmovzxbq -; CHECK: vpmovzxbq (%rdi), %ymm0 +; X32-LABEL: test_llvm_x86_avx2_pmovzxbq: +; X32: ## BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero +; X32-NEXT: retl +; +; X64-LABEL: test_llvm_x86_avx2_pmovzxbq: +; X64: ## BB#0: +; X64-NEXT: vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero +; X64-NEXT: retq %1 = load <16 x i8>, <16 x i8>* %a, align 1 %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <4 x i32> %3 = zext <4 x i8> %2 to <4 x i64> @@ -78,16 +152,32 @@ define <4 x i64> @test_llvm_x86_avx2_pmovzxbq(<16 x i8>* %a) { } define <8 x i32> @test_llvm_x86_avx2_pmovzxwd(<8 x i16>* %a) { -; CHECK-LABEL: test_llvm_x86_avx2_pmovzxwd -; CHECK: vpmovzxwd (%rdi), %ymm0 +; X32-LABEL: test_llvm_x86_avx2_pmovzxwd: +; X32: ## BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; X32-NEXT: retl +; +; X64-LABEL: test_llvm_x86_avx2_pmovzxwd: +; X64: ## BB#0: +; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; X64-NEXT: retq %1 = load <8 x i16>, <8 x i16>* %a, align 1 %2 = zext <8 x i16> %1 to <8 x i32> ret <8 x i32> %2 } define <4 x i64> @test_llvm_x86_avx2_pmovzxwq(<8 x i16>* %a) { -; CHECK-LABEL: test_llvm_x86_avx2_pmovzxwq -; CHECK: vpmovzxwq (%rdi), %ymm0 +; X32-LABEL: test_llvm_x86_avx2_pmovzxwq: +; X32: ## BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; X32-NEXT: retl +; +; X64-LABEL: test_llvm_x86_avx2_pmovzxwq: +; X64: ## BB#0: +; X64-NEXT: vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; X64-NEXT: retq %1 = load <8 x i16>, <8 x i16>* %a, align 1 %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <4 x i32> %3 = zext <4 x i16> %2 to <4 x i64> @@ -95,8 +185,16 @@ define <4 x i64> @test_llvm_x86_avx2_pmovzxwq(<8 x i16>* %a) { } define <4 x i64> @test_llvm_x86_avx2_pmovzxdq(<4 x i32>* %a) { -; CHECK-LABEL: test_llvm_x86_avx2_pmovzxdq -; CHECK: vpmovzxdq (%rdi), %ymm0 +; X32-LABEL: test_llvm_x86_avx2_pmovzxdq: +; X32: ## BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; X32-NEXT: retl +; +; X64-LABEL: test_llvm_x86_avx2_pmovzxdq: +; X64: ## BB#0: +; X64-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; X64-NEXT: retq %1 = load <4 x i32>, <4 x i32>* %a, align 1 %2 = zext <4 x i32> %1 to <4 x i64> ret <4 x i64> %2