let ParserMatchClass = MVE_VIDUP_imm_asmoperand;
}
+// Pair vector indexing
+class MVEPairVectorIndexOperand<string start, string end> : AsmOperandClass {
+ let Name = "MVEPairVectorIndex"#start;
+ let RenderMethod = "addMVEPairVectorIndexOperands";
+ let PredicateMethod = "isMVEPairVectorIndex<"#start#", "#end#">";
+}
+
+class MVEPairVectorIndex<string opval> : Operand<i32> {
+ let PrintMethod = "printVectorIndex";
+ let EncoderMethod = "getMVEPairVectorIndexOpValue<"#opval#">";
+ let DecoderMethod = "DecodeMVEPairVectorIndexOperand<"#opval#">";
+ let MIOperandInfo = (ops i32imm);
+}
+
+def MVEPairVectorIndex0 : MVEPairVectorIndex<"0"> {
+ let ParserMatchClass = MVEPairVectorIndexOperand<"0", "1">;
+}
+
+def MVEPairVectorIndex2 : MVEPairVectorIndex<"2"> {
+ let ParserMatchClass = MVEPairVectorIndexOperand<"2", "3">;
+}
+
// Vector indexing
class MVEVectorIndexOperand<int NumLanes> : AsmOperandClass {
let Name = "MVEVectorIndex"#NumLanes;
// end of mve_qDest_rSrc
+// start of coproc mov
+
+class MVE_VMOV_64bit<dag oops, dag iops, bit to_qreg, string ops, string cstr>
+ : MVE_VMOV_lane_base<oops, !con(iops, (ins MVEPairVectorIndex2:$idx,
+ MVEPairVectorIndex0:$idx2)),
+ NoItinerary, "vmov", "", ops, cstr, []> {
+ bits<5> Rt;
+ bits<5> Rt2;
+ bits<4> Qd;
+ bit idx;
+ bit idx2;
+
+ let Inst{31-23} = 0b111011000;
+ let Inst{22} = Qd{3};
+ let Inst{21} = 0b0;
+ let Inst{20} = to_qreg;
+ let Inst{19-16} = Rt2{3-0};
+ let Inst{15-13} = Qd{2-0};
+ let Inst{12-5} = 0b01111000;
+ let Inst{4} = idx2;
+ let Inst{3-0} = Rt{3-0};
+}
+
+// The assembly syntax for these instructions mentions the vector
+// register name twice, e.g.
+//
+// vmov q2[2], q2[0], r0, r1
+// vmov r0, r1, q2[2], q2[0]
+//
+// which needs a bit of juggling with MC operand handling.
+//
+// For the move _into_ a vector register, the MC operand list also has
+// to mention the register name twice: once as the output, and once as
+// an extra input to represent where the unchanged half of the output
+// register comes from (when this instruction is used in code
+// generation). So we arrange that the first mention of the vector reg
+// in the instruction is considered by the AsmMatcher to be the output
+// ($Qd), and the second one is the input ($QdSrc). Binding them
+// together with the existing 'tie' constraint is enough to enforce at
+// register allocation time that they have to be the same register.
+//
+// For the move _from_ a vector register, there's no way to get round
+// the fact that both instances of that register name have to be
+// inputs. They have to be the same register again, but this time, we
+// can't use a tie constraint, because that has to be between an
+// output and an input operand. So this time, we have to arrange that
+// the q-reg appears just once in the MC operand list, in spite of
+// being mentioned twice in the asm syntax - which needs a custom
+// AsmMatchConverter.
+
+def MVE_VMOV_q_rr : MVE_VMOV_64bit<(outs MQPR:$Qd),
+ (ins MQPR:$QdSrc, rGPR:$Rt, rGPR:$Rt2),
+ 0b1, "$Qd$idx, $QdSrc$idx2, $Rt, $Rt2",
+ "$Qd = $QdSrc"> {
+ let DecoderMethod = "DecodeMVEVMOVDRegtoQ";
+}
+
+def MVE_VMOV_rr_q : MVE_VMOV_64bit<(outs rGPR:$Rt, rGPR:$Rt2), (ins MQPR:$Qd),
+ 0b0, "$Rt, $Rt2, $Qd$idx, $Qd$idx2", ""> {
+ let DecoderMethod = "DecodeMVEVMOVQtoDReg";
+ let AsmMatchConverter = "cvtMVEVMOVQtoDReg";
+}
+
+// end of coproc mov
+
class MVE_VPT<string suffix, bits<2> size, dag iops, string asm, list<dag> pattern=[]>
: MVE_MI<(outs ), iops, NoItinerary, !strconcat("vpt", "${Mk}", ".", suffix), asm, "", pattern> {
bits<3> fc;
// Asm Match Converter Methods
void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
void cvtThumbBranches(MCInst &Inst, const OperandVector &);
+ void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
+ template<int PermittedValue, int OtherPermittedValue>
+ bool isMVEPairVectorIndex() const {
+ if (Kind != k_VectorIndex) return false;
+ return VectorIndex.Val == PermittedValue ||
+ VectorIndex.Val == OtherPermittedValue;
+ }
+
bool isNEONi8splat() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(getVectorIndex()));
}
+ void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::createImm(getVectorIndex()));
+ }
+
void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate encodes the type of constant as well as the value.
((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
}
+void ARMAsmParser::cvtMVEVMOVQtoDReg(
+ MCInst &Inst, const OperandVector &Operands) {
+
+ // mnemonic, condition code, Rt, Rt2, Qd, idx, Qd again, idx2
+ assert(Operands.size() == 8);
+
+ ((ARMOperand &)*Operands[2]).addRegOperands(Inst, 1); // Rt
+ ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1); // Rt2
+ ((ARMOperand &)*Operands[4]).addRegOperands(Inst, 1); // Qd
+ ((ARMOperand &)*Operands[5]).addMVEPairVectorIndexOperands(Inst, 1); // idx
+ // skip second copy of Qd in Operands[6]
+ ((ARMOperand &)*Operands[7]).addMVEPairVectorIndexOperands(Inst, 1); // idx2
+ ((ARMOperand &)*Operands[1]).addCondCodeOperands(Inst, 2); // condition code
+}
+
/// Parse an ARM memory expression, return false if successful else return true
/// or an error. The first token must be a '[' when called.
bool ARMAsmParser::parseMemory(OperandVector &Operands) {
}
break;
}
+ case ARM::MVE_VMOV_rr_q: {
+ if (Operands[4]->getReg() != Operands[6]->getReg())
+ return Error (Operands[4]->getStartLoc(), "Q-registers must be the same");
+ if (static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() !=
+ static_cast<ARMOperand &>(*Operands[7]).getVectorIndex() + 2)
+ return Error (Operands[5]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
+ break;
+ }
+ case ARM::MVE_VMOV_q_rr: {
+ if (Operands[2]->getReg() != Operands[4]->getReg())
+ return Error (Operands[2]->getStartLoc(), "Q-registers must be the same");
+ if (static_cast<ARMOperand &>(*Operands[3]).getVectorIndex() !=
+ static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() + 2)
+ return Error (Operands[3]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
+ break;
+ }
}
return false;
static DecodeStatus DecodeExpandedImmOperand(MCInst &Inst, unsigned Val,
uint64_t Address,
const void *Decoder);
+template<unsigned start>
+static DecodeStatus DecodeMVEPairVectorIndexOperand(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeMVEVMOVQtoDReg(MCInst &Inst, unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeMVEVMOVDRegtoQ(MCInst &Inst, unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
static DecodeStatus DecodeMVEVCVTt1fp(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
typedef DecodeStatus OperandDecoder(MCInst &Inst, unsigned Val,
return MCDisassembler::Success;
}
+template<unsigned start>
+static DecodeStatus DecodeMVEPairVectorIndexOperand(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder) {
+ DecodeStatus S = MCDisassembler::Success;
+
+ Inst.addOperand(MCOperand::createImm(start + Val));
+
+ return S;
+}
+
+static DecodeStatus DecodeMVEVMOVQtoDReg(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ DecodeStatus S = MCDisassembler::Success;
+ unsigned Rt = fieldFromInstruction(Insn, 0, 4);
+ unsigned Rt2 = fieldFromInstruction(Insn, 16, 4);
+ unsigned Qd = ((fieldFromInstruction(Insn, 22, 1) << 3) |
+ fieldFromInstruction(Insn, 13, 3));
+ unsigned index = fieldFromInstruction(Insn, 4, 1);
+
+ if (!Check(S, DecodeGPRRegisterClass(Inst, Rt, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeGPRRegisterClass(Inst, Rt2, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeMQPRRegisterClass(Inst, Qd, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeMVEPairVectorIndexOperand<2>(Inst, index, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeMVEPairVectorIndexOperand<0>(Inst, index, Address, Decoder)))
+ return MCDisassembler::Fail;
+
+ return S;
+}
+
+static DecodeStatus DecodeMVEVMOVDRegtoQ(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ DecodeStatus S = MCDisassembler::Success;
+ unsigned Rt = fieldFromInstruction(Insn, 0, 4);
+ unsigned Rt2 = fieldFromInstruction(Insn, 16, 4);
+ unsigned Qd = ((fieldFromInstruction(Insn, 22, 1) << 3) |
+ fieldFromInstruction(Insn, 13, 3));
+ unsigned index = fieldFromInstruction(Insn, 4, 1);
+
+ if (!Check(S, DecodeMQPRRegisterClass(Inst, Qd, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeMQPRRegisterClass(Inst, Qd, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeGPRRegisterClass(Inst, Rt, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeGPRRegisterClass(Inst, Rt2, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeMVEPairVectorIndexOperand<2>(Inst, index, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeMVEPairVectorIndexOperand<0>(Inst, index, Address, Decoder)))
+ return MCDisassembler::Fail;
+
+ return S;
+}
+
static DecodeStatus DecodeMVEOverlappingLongShift(
MCInst &Inst, unsigned Insn, uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
uint32_t getRestrictedCondCodeOpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
+ template <unsigned size>
+ uint32_t getMVEPairVectorIndexOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
};
} // end anonymous namespace
return countTrailingZeros((uint64_t)MO.getImm());
}
+template <unsigned start>
+uint32_t ARMMCCodeEmitter::
+getMVEPairVectorIndexOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Unexpected operand type!");
+
+ int Value = MO.getImm();
+ return Value - start;
+}
+
#include "ARMGenMCCodeEmitter.inc"
MCCodeEmitter *llvm::createARMLEMCCodeEmitter(const MCInstrInfo &MCII,
--- /dev/null
+# RUN: not llvm-mc -triple=thumbv8.1m.main-none-eabi -mattr=+mve -show-encoding < %s \
+# RUN: | FileCheck --check-prefix=CHECK-NOFP %s
+# RUN: not llvm-mc -triple=thumbv8.1m.main-none-eabi -mattr=+mve.fp,+fp64 -show-encoding < %s 2>%t \
+# RUN: | FileCheck --check-prefix=CHECK %s
+# RUN: FileCheck --check-prefix=ERROR < %t %s
+
+# CHECK: vmov lr, r7, q4[2], q4[0] @ encoding: [0x07,0xec,0x0e,0x8f]
+# CHECK-NOFP: vmov lr, r7, q4[2], q4[0] @ encoding: [0x07,0xec,0x0e,0x8f]
+vmov lr, r7, q4[2], q4[0]
+
+# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: Q-registers must be the same
+vmov lr, r7, q5[2], q4[0]
+
+# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: Q-register indexes must be 2 and 0 or 3 and 1
+vmov lr, r7, q4[2], q4[1]
+
+# CHECK: vmov q3[3], q3[1], r4, r1 @ encoding: [0x11,0xec,0x14,0x6f]
+# CHECK-NOFP: vmov q3[3], q3[1], r4, r1 @ encoding: [0x11,0xec,0x14,0x6f]
+vmov q3[3], q3[1], r4, r1
+
+# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: Q-registers must be the same
+vmov q4[3], q3[1], r4, r1
+
+# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: Q-register indexes must be 2 and 0 or 3 and 1
+vmov q3[2], q3[1], r4, r1
--- /dev/null
+# RUN: not llvm-mc -disassemble -triple=thumbv8.1m.main-none-eabi -mattr=+mve.fp,+fp64 -show-encoding %s 2> %t | FileCheck %s
+# RUN: FileCheck --check-prefix=ERROR < %t %s
+# RUN: not llvm-mc -disassemble -triple=thumbv8.1m.main-none-eabi -show-encoding %s &> %t
+# RUN: FileCheck --check-prefix=CHECK-NOMVE < %t %s
+# RUN: not llvm-mc -disassemble -triple=thumbv8.1m.main-none-eabi -mattr=+mve.fp,+fp64 -show-encoding %s 2> %t | FileCheck %s
+# RUN: FileCheck --check-prefix=ERROR < %t %s
+
+# CHECK: vmov lr, r7, q4[2], q4[0] @ encoding: [0x07,0xec,0x0e,0x8f]
+# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
+[0x07,0xec,0x0e,0x8f]
+
+# CHECK: vmov q3[3], q3[1], r4, r1 @ encoding: [0x11,0xec,0x14,0x6f]
+# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
+[0x11,0xec,0x14,0x6f]
+
+# ERROR: [[@LINE+1]]:2: warning: invalid instruction encoding
+[0x40,0xec,0x00,0x0f]
+
+# ERROR: [[@LINE+1]]:2: warning: invalid instruction encoding
+[0x50,0xec,0x00,0x0f]