From 1765e74c15c83db437018a3c9efabbeb4ce9cbde Mon Sep 17 00:00:00 2001 From: Elena Demikhovsky Date: Thu, 22 Aug 2013 12:18:28 +0000 Subject: [PATCH] AVX-512: Added masked SHIFT commands, more encoding tests git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@189005 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp | 11 ++--- lib/Target/X86/X86InstrAVX512.td | 56 ++++++++++++++++-------- utils/TableGen/X86RecognizableInstr.cpp | 40 ++++++++++------- 3 files changed, 67 insertions(+), 40 deletions(-) diff --git a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp index 7173d51e6a2..0c9fd91e310 100644 --- a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp @@ -868,11 +868,12 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, case X86II::MRM6r: case X86II::MRM7r: // MRM0r-MRM7r instructions forms: // dst(VEX_4V), src(ModR/M), imm8 - VEX_4V = getVEXRegisterEncoding(MI, CurOp); - if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) - EVEX_V2 = 0x0; - CurOp++; - + if (HasVEX_4V) { + VEX_4V = getVEXRegisterEncoding(MI, CurOp); + if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + EVEX_V2 = 0x0; + CurOp++; + } if (HasEVEX_K) EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++); diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td index c3fb8019cc9..0cb946ddb8c 100644 --- a/lib/Target/X86/X86InstrAVX512.td +++ b/lib/Target/X86/X86InstrAVX512.td @@ -1720,78 +1720,98 @@ defm VPTESTMQZ : avx512_vptest<0x27, "vptestmq", VK8, VR512, f512mem, memopv8i // AVX-512 Shift instructions //===----------------------------------------------------------------------===// multiclass avx512_shift_rmi opc, Format ImmFormR, Format ImmFormM, - string OpcodeStr, - SDNode OpNode, RegisterClass RC, ValueType vt, - X86MemOperand x86memop, PatFrag mem_frag> { + string OpcodeStr, SDNode OpNode, RegisterClass RC, + ValueType vt, X86MemOperand x86memop, PatFrag mem_frag, + RegisterClass KRC> { def ri : AVX512BIi8, EVEX_4V; + def rik : AVX512BIi8, EVEX_4V, EVEX_K; def mi: AVX512BIi8, EVEX_4V; + def mik: AVX512BIi8, EVEX_4V, EVEX_K; } multiclass avx512_shift_rrm opc, string OpcodeStr, SDNode OpNode, RegisterClass RC, ValueType vt, ValueType SrcVT, - PatFrag bc_frag> { + PatFrag bc_frag, RegisterClass KRC> { // src2 is always 128-bit def rr : AVX512BI, EVEX_4V; + def rrk : AVX512BI, EVEX_4V, EVEX_K; def rm : AVX512BI, EVEX_4V; + def rmk : AVX512BI, EVEX_4V, EVEX_K; } defm VPSRLDZ : avx512_shift_rmi<0x72, MRM2r, MRM2m, "vpsrld", X86vsrli, - VR512, v16i32, i512mem, memopv16i32>, EVEX_V512, - EVEX_CD8<32, CD8VF>; + VR512, v16i32, i512mem, memopv16i32, VK16WM>, + EVEX_V512, EVEX_CD8<32, CD8VF>; defm VPSRLDZ : avx512_shift_rrm<0xD2, "vpsrld", X86vsrl, - VR512, v16i32, v4i32, bc_v4i32>, EVEX_V512, + VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512, EVEX_CD8<32, CD8VQ>; defm VPSRLQZ : avx512_shift_rmi<0x73, MRM2r, MRM2m, "vpsrlq", X86vsrli, - VR512, v8i64, i512mem, memopv8i64>, EVEX_V512, + VR512, v8i64, i512mem, memopv8i64, VK8WM>, EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W; defm VPSRLQZ : avx512_shift_rrm<0xD3, "vpsrlq", X86vsrl, - VR512, v8i64, v2i64, bc_v2i64>, EVEX_V512, + VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512, EVEX_CD8<64, CD8VQ>, VEX_W; defm VPSLLDZ : avx512_shift_rmi<0x72, MRM6r, MRM6m, "vpslld", X86vshli, - VR512, v16i32, i512mem, memopv16i32>, EVEX_V512, + VR512, v16i32, i512mem, memopv16i32, VK16WM>, EVEX_V512, EVEX_CD8<32, CD8VF>; defm VPSLLDZ : avx512_shift_rrm<0xF2, "vpslld", X86vshl, - VR512, v16i32, v4i32, bc_v4i32>, EVEX_V512, + VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512, EVEX_CD8<32, CD8VQ>; defm VPSLLQZ : avx512_shift_rmi<0x73, MRM6r, MRM6m, "vpsllq", X86vshli, - VR512, v8i64, i512mem, memopv8i64>, EVEX_V512, + VR512, v8i64, i512mem, memopv8i64, VK8WM>, EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W; defm VPSLLQZ : avx512_shift_rrm<0xF3, "vpsllq", X86vshl, - VR512, v8i64, v2i64, bc_v2i64>, EVEX_V512, + VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512, EVEX_CD8<64, CD8VQ>, VEX_W; defm VPSRADZ : avx512_shift_rmi<0x72, MRM4r, MRM4m, "vpsrad", X86vsrai, - VR512, v16i32, i512mem, memopv16i32>, EVEX_V512, - EVEX_CD8<32, CD8VF>; + VR512, v16i32, i512mem, memopv16i32, VK16WM>, + EVEX_V512, EVEX_CD8<32, CD8VF>; defm VPSRADZ : avx512_shift_rrm<0xE2, "vpsrad", X86vsra, - VR512, v16i32, v4i32, bc_v4i32>, EVEX_V512, + VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512, EVEX_CD8<32, CD8VQ>; defm VPSRAQZ : avx512_shift_rmi<0x72, MRM4r, MRM4m, "vpsraq", X86vsrai, - VR512, v8i64, i512mem, memopv8i64>, EVEX_V512, + VR512, v8i64, i512mem, memopv8i64, VK8WM>, EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W; defm VPSRAQZ : avx512_shift_rrm<0xE2, "vpsraq", X86vsra, - VR512, v8i64, v2i64, bc_v2i64>, EVEX_V512, + VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512, EVEX_CD8<64, CD8VQ>, VEX_W; //===-------------------------------------------------------------------===// diff --git a/utils/TableGen/X86RecognizableInstr.cpp b/utils/TableGen/X86RecognizableInstr.cpp index 7962f9b9834..d2675d7a625 100644 --- a/utils/TableGen/X86RecognizableInstr.cpp +++ b/utils/TableGen/X86RecognizableInstr.cpp @@ -818,17 +818,20 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) { case X86Local::MRM5r: case X86Local::MRM6r: case X86Local::MRM7r: - // Operand 1 is a register operand in the R/M field. - // Operand 2 (optional) is an immediate or relocation. - // Operand 3 (optional) is an immediate. - if (HasVEX_4VPrefix) - assert(numPhysicalOperands <= 3 && - "Unexpected number of operands for MRMnRFrm with VEX_4V"); - else - assert(numPhysicalOperands <= 3 && - "Unexpected number of operands for MRMnRFrm"); + { + // Operand 1 is a register operand in the R/M field. + // Operand 2 (optional) is an immediate or relocation. + // Operand 3 (optional) is an immediate. + unsigned kOp = (HasEVEX_K) ? 1:0; + unsigned Op4v = (HasVEX_4VPrefix) ? 1:0; + if (numPhysicalOperands > 3 + kOp + Op4v) + llvm_unreachable("Unexpected number of operands for MRMnr"); + } if (HasVEX_4VPrefix) HANDLE_OPERAND(vvvvRegister) + + if (HasEVEX_K) + HANDLE_OPERAND(writemaskRegister) HANDLE_OPTIONAL(rmRegister) HANDLE_OPTIONAL(relocation) HANDLE_OPTIONAL(immediate) @@ -841,16 +844,19 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) { case X86Local::MRM5m: case X86Local::MRM6m: case X86Local::MRM7m: - // Operand 1 is a memory operand (possibly SIB-extended) - // Operand 2 (optional) is an immediate or relocation. - if (HasVEX_4VPrefix) - assert(numPhysicalOperands >= 2 && numPhysicalOperands <= 3 && - "Unexpected number of operands for MRMnMFrm"); - else - assert(numPhysicalOperands >= 1 && numPhysicalOperands <= 2 && - "Unexpected number of operands for MRMnMFrm"); + { + // Operand 1 is a memory operand (possibly SIB-extended) + // Operand 2 (optional) is an immediate or relocation. + unsigned kOp = (HasEVEX_K) ? 1:0; + unsigned Op4v = (HasVEX_4VPrefix) ? 1:0; + if (numPhysicalOperands < 1 + kOp + Op4v || + numPhysicalOperands > 2 + kOp + Op4v) + llvm_unreachable("Unexpected number of operands for MRMnm"); + } if (HasVEX_4VPrefix) HANDLE_OPERAND(vvvvRegister) + if (HasEVEX_K) + HANDLE_OPERAND(writemaskRegister) HANDLE_OPERAND(memory) HANDLE_OPTIONAL(relocation) break; -- 2.11.0