From 061426e6a131a6b671dc62c73319f57adee27921 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sat, 22 Oct 2016 06:51:56 +0000 Subject: [PATCH] [X86] Add support for printing shuffle comments for VALIGN instructions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@284915 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/InstPrinter/X86InstComments.cpp | 44 ++++++++++++++++++++++++++ lib/Target/X86/Utils/X86ShuffleDecode.cpp | 10 ++++++ lib/Target/X86/Utils/X86ShuffleDecode.h | 2 ++ test/CodeGen/X86/avx512-intrinsics.ll | 6 ++-- test/CodeGen/X86/avx512vl-intrinsics.ll | 9 ++++++ 5 files changed, 68 insertions(+), 3 deletions(-) diff --git a/lib/Target/X86/InstPrinter/X86InstComments.cpp b/lib/Target/X86/InstPrinter/X86InstComments.cpp index cfbb90782ec..d608ba1a0b6 100644 --- a/lib/Target/X86/InstPrinter/X86InstComments.cpp +++ b/lib/Target/X86/InstPrinter/X86InstComments.cpp @@ -255,6 +255,10 @@ static std::string getMaskName(const MCInst *MI, const char *DestName, CASE_MASKZ_UNPCK(UNPCKLPS, r) CASE_MASKZ_SHUF(PALIGNR, r) CASE_MASKZ_SHUF(PALIGNR, m) + CASE_MASKZ_SHUF(ALIGNQ, r) + CASE_MASKZ_SHUF(ALIGNQ, m) + CASE_MASKZ_SHUF(ALIGND, r) + CASE_MASKZ_SHUF(ALIGND, m) CASE_MASKZ_SHUF(SHUFPD, m) CASE_MASKZ_SHUF(SHUFPD, r) CASE_MASKZ_SHUF(SHUFPS, m) @@ -340,6 +344,10 @@ static std::string getMaskName(const MCInst *MI, const char *DestName, CASE_MASK_UNPCK(UNPCKLPS, r) CASE_MASK_SHUF(PALIGNR, r) CASE_MASK_SHUF(PALIGNR, m) + CASE_MASK_SHUF(ALIGNQ, r) + CASE_MASK_SHUF(ALIGNQ, m) + CASE_MASK_SHUF(ALIGND, r) + CASE_MASK_SHUF(ALIGND, m) CASE_MASK_SHUF(SHUFPD, m) CASE_MASK_SHUF(SHUFPD, r) CASE_MASK_SHUF(SHUFPS, m) @@ -620,6 +628,42 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS, ShuffleMask); break; + CASE_AVX512_INS_COMMON(ALIGNQ, Z, rri) + CASE_AVX512_INS_COMMON(ALIGNQ, Z256, rri) + CASE_AVX512_INS_COMMON(ALIGNQ, Z128, rri) + Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg()); + RegForm = true; + LLVM_FALLTHROUGH; + + CASE_AVX512_INS_COMMON(ALIGNQ, Z, rmi) + CASE_AVX512_INS_COMMON(ALIGNQ, Z256, rmi) + CASE_AVX512_INS_COMMON(ALIGNQ, Z128, rmi) + Src2Name = getRegName(MI->getOperand(NumOperands-(RegForm?3:7)).getReg()); + DestName = getRegName(MI->getOperand(0).getReg()); + if (MI->getOperand(NumOperands - 1).isImm()) + DecodeVALIGNMask(getRegOperandVectorVT(MI, MVT::i64, 0), + MI->getOperand(NumOperands - 1).getImm(), + ShuffleMask); + break; + + CASE_AVX512_INS_COMMON(ALIGND, Z, rri) + CASE_AVX512_INS_COMMON(ALIGND, Z256, rri) + CASE_AVX512_INS_COMMON(ALIGND, Z128, rri) + Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg()); + RegForm = true; + LLVM_FALLTHROUGH; + + CASE_AVX512_INS_COMMON(ALIGND, Z, rmi) + CASE_AVX512_INS_COMMON(ALIGND, Z256, rmi) + CASE_AVX512_INS_COMMON(ALIGND, Z128, rmi) + Src2Name = getRegName(MI->getOperand(NumOperands-(RegForm?3:7)).getReg()); + DestName = getRegName(MI->getOperand(0).getReg()); + if (MI->getOperand(NumOperands - 1).isImm()) + DecodeVALIGNMask(getRegOperandVectorVT(MI, MVT::i32, 0), + MI->getOperand(NumOperands - 1).getImm(), + ShuffleMask); + break; + CASE_SHUF(PSHUFD, ri) Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg()); LLVM_FALLTHROUGH; diff --git a/lib/Target/X86/Utils/X86ShuffleDecode.cpp b/lib/Target/X86/Utils/X86ShuffleDecode.cpp index ec3b3b42be6..3c04bf4899f 100644 --- a/lib/Target/X86/Utils/X86ShuffleDecode.cpp +++ b/lib/Target/X86/Utils/X86ShuffleDecode.cpp @@ -151,6 +151,16 @@ void DecodePALIGNRMask(MVT VT, unsigned Imm, } } +void DecodeVALIGNMask(MVT VT, unsigned Imm, + SmallVectorImpl &ShuffleMask) { + int NumElts = VT.getVectorNumElements(); + // Not all bits of the immediate are used so mask it. + assert(isPowerOf2_32(NumElts) && "NumElts should be power of 2"); + Imm = Imm & (NumElts - 1); + for (int i = 0; i != NumElts; ++i) + ShuffleMask.push_back(i + Imm); +} + /// DecodePSHUFMask - This decodes the shuffle masks for pshufw, pshufd, and vpermilp*. /// VT indicates the type of the vector allowing it to handle different /// datatypes and vector widths. diff --git a/lib/Target/X86/Utils/X86ShuffleDecode.h b/lib/Target/X86/Utils/X86ShuffleDecode.h index dc21c19752c..17619d09d05 100644 --- a/lib/Target/X86/Utils/X86ShuffleDecode.h +++ b/lib/Target/X86/Utils/X86ShuffleDecode.h @@ -55,6 +55,8 @@ void DecodePSRLDQMask(MVT VT, unsigned Imm, SmallVectorImpl &ShuffleMask); void DecodePALIGNRMask(MVT VT, unsigned Imm, SmallVectorImpl &ShuffleMask); +void DecodeVALIGNMask(MVT VT, unsigned Imm, SmallVectorImpl &ShuffleMask); + /// Decodes the shuffle masks for pshufd/pshufw/vpermilpd/vpermilps. /// VT indicates the type of the vector allowing it to handle different /// datatypes and vector widths. diff --git a/test/CodeGen/X86/avx512-intrinsics.ll b/test/CodeGen/X86/avx512-intrinsics.ll index 495025c1ba9..5098f8b775e 100644 --- a/test/CodeGen/X86/avx512-intrinsics.ll +++ b/test/CodeGen/X86/avx512-intrinsics.ll @@ -846,7 +846,7 @@ declare i16 @llvm.x86.avx512.ptestm.d.512(<16 x i32>, <16 x i32>, i16) define <8 x i64> @test_valign_q(<8 x i64> %a, <8 x i64> %b) { ; CHECK-LABEL: test_valign_q: ; CHECK: ## BB#0: -; CHECK-NEXT: valignq $2, %zmm1, %zmm0, %zmm0 +; CHECK-NEXT: valignq {{.*#+}} zmm0 = zmm1[2,3,4,5,6,7],zmm0[0,1] ; CHECK-NEXT: retq %res = call <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64> %a, <8 x i64> %b, i32 2, <8 x i64> zeroinitializer, i8 -1) ret <8 x i64> %res @@ -856,7 +856,7 @@ define <8 x i64> @test_mask_valign_q(<8 x i64> %a, <8 x i64> %b, <8 x i64> %src, ; CHECK-LABEL: test_mask_valign_q: ; CHECK: ## BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: valignq $2, %zmm1, %zmm0, %zmm2 {%k1} +; CHECK-NEXT: valignq {{.*#+}} zmm2 {%k1} = zmm1[2,3,4,5,6,7],zmm0[0,1] ; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 ; CHECK-NEXT: retq %res = call <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64> %a, <8 x i64> %b, i32 2, <8 x i64> %src, i8 %mask) @@ -869,7 +869,7 @@ define <16 x i32> @test_maskz_valign_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) ; CHECK-LABEL: test_maskz_valign_d: ; CHECK: ## BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: valignd $5, %zmm1, %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: valignd {{.*#+}} zmm0 {%k1} {z} = zmm1[5,6,7,8,9,10,11,12,13,14,15],zmm0[0,1,2,3,4] ; CHECK-NEXT: retq %res = call <16 x i32> @llvm.x86.avx512.mask.valign.d.512(<16 x i32> %a, <16 x i32> %b, i32 5, <16 x i32> zeroinitializer, i16 %mask) ret <16 x i32> %res diff --git a/test/CodeGen/X86/avx512vl-intrinsics.ll b/test/CodeGen/X86/avx512vl-intrinsics.ll index e2448979528..e5a211b029e 100644 --- a/test/CodeGen/X86/avx512vl-intrinsics.ll +++ b/test/CodeGen/X86/avx512vl-intrinsics.ll @@ -3946,8 +3946,11 @@ define <4 x i32>@test_int_x86_avx512_mask_valign_d_128(<4 x i32> %x0, <4 x i32> ; CHECK: ## BB#0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: valignd $22, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x03,0xd1,0x16] +; CHECK-NEXT: ## xmm2 {%k1} = xmm1[2,3],xmm0[0,1] ; CHECK-NEXT: valignd $22, %xmm1, %xmm0, %xmm3 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0x89,0x03,0xd9,0x16] +; CHECK-NEXT: ## xmm3 {%k1} {z} = xmm1[2,3],xmm0[0,1] ; CHECK-NEXT: valignd $22, %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0x7d,0x08,0x03,0xc1,0x16] +; CHECK-NEXT: ## xmm0 = xmm1[2,3],xmm0[0,1] ; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0 ## encoding: [0x62,0xf1,0x6d,0x08,0xfe,0xc0] ; CHECK-NEXT: vpaddd %xmm3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0xfe,0xc3] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -3966,7 +3969,9 @@ define <8 x i32>@test_int_x86_avx512_mask_valign_d_256(<8 x i32> %x0, <8 x i32> ; CHECK: ## BB#0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: valignd $22, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x03,0xd1,0x16] +; CHECK-NEXT: ## ymm2 {%k1} = ymm1[6,7],ymm0[0,1,2,3,4,5] ; CHECK-NEXT: valignd $22, %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0x7d,0x28,0x03,0xc1,0x16] +; CHECK-NEXT: ## ymm0 = ymm1[6,7],ymm0[0,1,2,3,4,5] ; CHECK-NEXT: vpaddd %ymm0, %ymm2, %ymm0 ## encoding: [0x62,0xf1,0x6d,0x28,0xfe,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx512.mask.valign.d.256(<8 x i32> %x0, <8 x i32> %x1, i32 22, <8 x i32> %x3, i8 %x4) @@ -3982,7 +3987,9 @@ define <2 x i64>@test_int_x86_avx512_mask_valign_q_128(<2 x i64> %x0, <2 x i64> ; CHECK: ## BB#0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: valignq $22, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x03,0xd1,0x16] +; CHECK-NEXT: ## xmm2 {%k1} = xmm1[0,1] ; CHECK-NEXT: valignq $22, %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0xfd,0x08,0x03,0xc1,0x16] +; CHECK-NEXT: ## xmm0 = xmm1[0,1] ; CHECK-NEXT: vpaddq %xmm0, %xmm2, %xmm0 ## encoding: [0x62,0xf1,0xed,0x08,0xd4,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <2 x i64> @llvm.x86.avx512.mask.valign.q.128(<2 x i64> %x0, <2 x i64> %x1, i32 22, <2 x i64> %x3, i8 %x4) @@ -3998,7 +4005,9 @@ define <4 x i64>@test_int_x86_avx512_mask_valign_q_256(<4 x i64> %x0, <4 x i64> ; CHECK: ## BB#0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: valignq $22, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x03,0xd1,0x16] +; CHECK-NEXT: ## ymm2 {%k1} = ymm1[2,3],ymm0[0,1] ; CHECK-NEXT: valignq $22, %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0xfd,0x28,0x03,0xc1,0x16] +; CHECK-NEXT: ## ymm0 = ymm1[2,3],ymm0[0,1] ; CHECK-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ## encoding: [0x62,0xf1,0xed,0x28,0xd4,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx512.mask.valign.q.256(<4 x i64> %x0, <4 x i64> %x1, i32 22, <4 x i64> %x3, i8 %x4) -- 2.11.0