1 //===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
11 #include "SIDefines.h"
12 #include "llvm/ADT/APFloat.h"
13 #include "llvm/ADT/SmallString.h"
14 #include "llvm/ADT/SmallVector.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/StringSwitch.h"
17 #include "llvm/ADT/Twine.h"
18 #include "llvm/MC/MCContext.h"
19 #include "llvm/MC/MCExpr.h"
20 #include "llvm/MC/MCInst.h"
21 #include "llvm/MC/MCInstrInfo.h"
22 #include "llvm/MC/MCParser/MCAsmLexer.h"
23 #include "llvm/MC/MCParser/MCAsmParser.h"
24 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
25 #include "llvm/MC/MCRegisterInfo.h"
26 #include "llvm/MC/MCStreamer.h"
27 #include "llvm/MC/MCSubtargetInfo.h"
28 #include "llvm/MC/MCTargetAsmParser.h"
29 #include "llvm/Support/SourceMgr.h"
30 #include "llvm/Support/TargetRegistry.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include "llvm/Support/Debug.h"
38 struct OptionalOperand;
40 class AMDGPUOperand : public MCParsedAsmOperand {
48 SMLoc StartLoc, EndLoc;
51 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
82 const MCRegisterInfo *TRI;
92 void addImmOperands(MCInst &Inst, unsigned N) const {
93 Inst.addOperand(MCOperand::CreateImm(getImm()));
96 StringRef getToken() const {
97 return StringRef(Tok.Data, Tok.Length);
100 void addRegOperands(MCInst &Inst, unsigned N) const {
101 Inst.addOperand(MCOperand::CreateReg(getReg()));
104 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
106 addRegOperands(Inst, N);
108 addImmOperands(Inst, N);
111 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
112 Inst.addOperand(MCOperand::CreateImm(Reg.Modifiers));
113 addRegOperands(Inst, N);
116 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
118 addImmOperands(Inst, N);
121 Inst.addOperand(MCOperand::CreateExpr(Expr));
125 bool defaultTokenHasSuffix() const {
126 StringRef Token(Tok.Data, Tok.Length);
128 return Token.endswith("_e32") || Token.endswith("_e64");
131 bool isToken() const override {
132 return Kind == Token;
135 bool isImm() const override {
136 return Kind == Immediate;
139 bool isInlineImm() const {
140 float F = BitsToFloat(Imm.Val);
141 // TODO: Add 0.5pi for VI
142 return isImm() && ((Imm.Val <= 64 && Imm.Val >= -16) ||
143 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
144 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0));
147 bool isDSOffset0() const {
149 return Imm.Type == ImmTyDSOffset0;
152 bool isDSOffset1() const {
154 return Imm.Type == ImmTyDSOffset1;
157 int64_t getImm() const {
161 enum ImmTy getImmTy() const {
166 bool isReg() const override {
167 return Kind == Register && Reg.Modifiers == -1;
170 bool isRegWithInputMods() const {
171 return Kind == Register && Reg.Modifiers != -1;
174 void setModifiers(unsigned Mods) {
176 Reg.Modifiers = Mods;
179 unsigned getReg() const override {
183 bool isRegOrImm() const {
184 return isReg() || isImm();
187 bool isRegClass(unsigned RCID) const {
188 return Reg.TRI->getRegClass(RCID).contains(getReg());
191 bool isSCSrc32() const {
192 return isInlineImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
195 bool isSSrc32() const {
196 return isImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
199 bool isSSrc64() const {
200 return isImm() || isInlineImm() ||
201 (isReg() && isRegClass(AMDGPU::SReg_64RegClassID));
204 bool isVCSrc32() const {
205 return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
208 bool isVCSrc64() const {
209 return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
212 bool isVSrc32() const {
213 return isImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
216 bool isVSrc64() const {
217 return isImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
220 bool isMem() const override {
224 bool isExpr() const {
225 return Kind == Expression;
228 bool isSoppBrTarget() const {
229 return isExpr() || isImm();
232 SMLoc getStartLoc() const override {
236 SMLoc getEndLoc() const override {
240 void print(raw_ostream &OS) const override { }
242 static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
243 enum ImmTy Type = ImmTyNone,
244 bool IsFPImm = false) {
245 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
247 Op->Imm.IsFPImm = IsFPImm;
254 static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
255 bool HasExplicitEncodingSize = true) {
256 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
257 Res->Tok.Data = Str.data();
258 Res->Tok.Length = Str.size();
264 static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
266 const MCRegisterInfo *TRI) {
267 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
268 Op->Reg.RegNo = RegNo;
270 Op->Reg.Modifiers = -1;
276 static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
277 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
284 bool isDSOffset() const;
285 bool isDSOffset01() const;
286 bool isSWaitCnt() const;
287 bool isMubufOffset() const;
290 class AMDGPUAsmParser : public MCTargetAsmParser {
291 MCSubtargetInfo &STI;
292 const MCInstrInfo &MII;
295 unsigned ForcedEncodingSize;
296 /// @name Auto-generated Match Functions
299 #define GET_ASSEMBLER_HEADER
300 #include "AMDGPUGenAsmMatcher.inc"
305 AMDGPUAsmParser(MCSubtargetInfo &STI, MCAsmParser &_Parser,
306 const MCInstrInfo &MII,
307 const MCTargetOptions &Options)
308 : MCTargetAsmParser(), STI(STI), MII(MII), Parser(_Parser),
309 ForcedEncodingSize(0){
311 if (!STI.getFeatureBits()) {
312 // Set default features.
313 STI.ToggleFeature("SOUTHERN_ISLANDS");
316 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
319 unsigned getForcedEncodingSize() const {
320 return ForcedEncodingSize;
323 void setForcedEncodingSize(unsigned Size) {
324 ForcedEncodingSize = Size;
327 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
328 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
329 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
330 OperandVector &Operands, MCStreamer &Out,
332 bool MatchingInlineAsm) override;
333 bool ParseDirective(AsmToken DirectiveID) override;
334 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
335 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
336 SMLoc NameLoc, OperandVector &Operands) override;
338 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
339 int64_t Default = 0);
340 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
341 OperandVector &Operands,
342 enum AMDGPUOperand::ImmTy ImmTy =
343 AMDGPUOperand::ImmTyNone);
344 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
345 enum AMDGPUOperand::ImmTy ImmTy =
346 AMDGPUOperand::ImmTyNone);
347 OperandMatchResultTy parseOptionalOps(
348 const ArrayRef<OptionalOperand> &OptionalOps,
349 OperandVector &Operands);
352 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
353 void cvtDS(MCInst &Inst, const OperandVector &Operands);
354 OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
355 OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
356 OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
358 bool parseCnt(int64_t &IntVal);
359 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
360 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
362 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
363 OperandMatchResultTy parseOffset(OperandVector &Operands);
364 OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
365 OperandMatchResultTy parseGLC(OperandVector &Operands);
366 OperandMatchResultTy parseSLC(OperandVector &Operands);
367 OperandMatchResultTy parseTFE(OperandVector &Operands);
369 OperandMatchResultTy parseDMask(OperandVector &Operands);
370 OperandMatchResultTy parseUNorm(OperandVector &Operands);
371 OperandMatchResultTy parseR128(OperandVector &Operands);
373 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
374 OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
377 struct OptionalOperand {
379 AMDGPUOperand::ImmTy Type;
382 bool (*ConvertResult)(int64_t&);
387 static unsigned getRegClass(bool IsVgpr, unsigned RegWidth) {
390 default: llvm_unreachable("Unknown register width");
391 case 1: return AMDGPU::VGPR_32RegClassID;
392 case 2: return AMDGPU::VReg_64RegClassID;
393 case 3: return AMDGPU::VReg_96RegClassID;
394 case 4: return AMDGPU::VReg_128RegClassID;
395 case 8: return AMDGPU::VReg_256RegClassID;
396 case 16: return AMDGPU::VReg_512RegClassID;
401 default: llvm_unreachable("Unknown register width");
402 case 1: return AMDGPU::SGPR_32RegClassID;
403 case 2: return AMDGPU::SGPR_64RegClassID;
404 case 4: return AMDGPU::SReg_128RegClassID;
405 case 8: return AMDGPU::SReg_256RegClassID;
406 case 16: return AMDGPU::SReg_512RegClassID;
410 static unsigned getRegForName(const StringRef &RegName) {
412 return StringSwitch<unsigned>(RegName)
413 .Case("exec", AMDGPU::EXEC)
414 .Case("vcc", AMDGPU::VCC)
415 .Case("flat_scr", AMDGPU::FLAT_SCR)
416 .Case("m0", AMDGPU::M0)
417 .Case("scc", AMDGPU::SCC)
418 .Case("flat_scr_lo", AMDGPU::FLAT_SCR_LO)
419 .Case("flat_scr_hi", AMDGPU::FLAT_SCR_HI)
420 .Case("vcc_lo", AMDGPU::VCC_LO)
421 .Case("vcc_hi", AMDGPU::VCC_HI)
422 .Case("exec_lo", AMDGPU::EXEC_LO)
423 .Case("exec_hi", AMDGPU::EXEC_HI)
427 bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
428 const AsmToken Tok = Parser.getTok();
429 StartLoc = Tok.getLoc();
430 EndLoc = Tok.getEndLoc();
431 const StringRef &RegName = Tok.getString();
432 RegNo = getRegForName(RegName);
439 // Match vgprs and sgprs
440 if (RegName[0] != 's' && RegName[0] != 'v')
443 bool IsVgpr = RegName[0] == 'v';
445 unsigned RegIndexInClass;
446 if (RegName.size() > 1) {
447 // We have a 32-bit register
449 if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
453 // We have a register greater than 32-bits.
455 int64_t RegLo, RegHi;
457 if (getLexer().isNot(AsmToken::LBrac))
461 if (getParser().parseAbsoluteExpression(RegLo))
464 if (getLexer().isNot(AsmToken::Colon))
468 if (getParser().parseAbsoluteExpression(RegHi))
471 if (getLexer().isNot(AsmToken::RBrac))
475 RegWidth = (RegHi - RegLo) + 1;
477 // VGPR registers aren't aligned.
478 RegIndexInClass = RegLo;
480 // SGPR registers are aligned. Max alignment is 4 dwords.
481 RegIndexInClass = RegLo / std::min(RegWidth, 4u);
485 const MCRegisterInfo *TRC = getContext().getRegisterInfo();
486 unsigned RC = getRegClass(IsVgpr, RegWidth);
487 if (RegIndexInClass > TRC->getRegClass(RC).getNumRegs())
489 RegNo = TRC->getRegClass(RC).getRegister(RegIndexInClass);
493 unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
495 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
497 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
498 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
499 return Match_InvalidOperand;
501 return Match_Success;
505 bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
506 OperandVector &Operands,
509 bool MatchingInlineAsm) {
512 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
516 Out.EmitInstruction(Inst, STI);
518 case Match_MissingFeature:
519 return Error(IDLoc, "missing feature");
521 case Match_MnemonicFail:
522 return Error(IDLoc, "unrecognized instruction mnemonic");
524 case Match_InvalidOperand: {
525 SMLoc ErrorLoc = IDLoc;
526 if (ErrorInfo != ~0ULL) {
527 if (ErrorInfo >= Operands.size()) {
528 return Error(IDLoc, "too few operands for instruction");
531 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
532 if (ErrorLoc == SMLoc())
535 return Error(ErrorLoc, "invalid operand for instruction");
538 llvm_unreachable("Implement any new match types added!");
541 bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
545 static bool operandsHaveModifiers(const OperandVector &Operands) {
547 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
548 const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
549 if (Op.isRegWithInputMods())
551 if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
552 Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
558 AMDGPUAsmParser::OperandMatchResultTy
559 AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
561 // Try to parse with a custom parser
562 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
564 // If we successfully parsed the operand or if there as an error parsing,
567 // If we are parsing after we reach EndOfStatement then this means we
568 // are appending default values to the Operands list. This is only done
569 // by custom parser, so we shouldn't continue on to the generic parsing.
570 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
571 getLexer().is(AsmToken::EndOfStatement))
574 bool Negate = false, Abs = false;
575 if (getLexer().getKind()== AsmToken::Minus) {
580 if (getLexer().getKind() == AsmToken::Pipe) {
585 switch(getLexer().getKind()) {
586 case AsmToken::Integer: {
587 SMLoc S = Parser.getTok().getLoc();
589 if (getParser().parseAbsoluteExpression(IntVal))
590 return MatchOperand_ParseFail;
591 APInt IntVal32(32, IntVal);
592 if (IntVal32.getSExtValue() != IntVal) {
593 Error(S, "invalid immediate: only 32-bit values are legal");
594 return MatchOperand_ParseFail;
597 IntVal = IntVal32.getSExtValue();
600 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
601 return MatchOperand_Success;
603 case AsmToken::Real: {
604 // FIXME: We should emit an error if a double precisions floating-point
605 // value is used. I'm not sure the best way to detect this.
606 SMLoc S = Parser.getTok().getLoc();
608 if (getParser().parseAbsoluteExpression(IntVal))
609 return MatchOperand_ParseFail;
611 APFloat F((float)BitsToDouble(IntVal));
615 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
616 return MatchOperand_Success;
618 case AsmToken::Identifier: {
621 if (!ParseRegister(RegNo, S, E)) {
623 bool HasModifiers = operandsHaveModifiers(Operands);
624 unsigned Modifiers = 0;
630 if (getLexer().getKind() != AsmToken::Pipe)
631 return MatchOperand_ParseFail;
636 if (Modifiers && !HasModifiers) {
637 // We are adding a modifier to src1 or src2 and previous sources
638 // don't have modifiers, so we need to go back and empty modifers
639 // for each previous source.
640 for (unsigned PrevRegIdx = Operands.size() - 1; PrevRegIdx > 1;
643 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[PrevRegIdx]);
644 RegOp.setModifiers(0);
649 Operands.push_back(AMDGPUOperand::CreateReg(
650 RegNo, S, E, getContext().getRegisterInfo()));
652 if (HasModifiers || Modifiers) {
653 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
654 RegOp.setModifiers(Modifiers);
658 Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
662 return MatchOperand_Success;
665 return MatchOperand_NoMatch;
669 bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
671 SMLoc NameLoc, OperandVector &Operands) {
673 // Clear any forced encodings from the previous instruction.
674 setForcedEncodingSize(0);
676 if (Name.endswith("_e64"))
677 setForcedEncodingSize(64);
678 else if (Name.endswith("_e32"))
679 setForcedEncodingSize(32);
681 // Add the instruction mnemonic
682 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
684 while (!getLexer().is(AsmToken::EndOfStatement)) {
685 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
687 // Eat the comma or space if there is one.
688 if (getLexer().is(AsmToken::Comma))
692 case MatchOperand_Success: break;
693 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
694 "failed parsing operand.");
695 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
696 "not a valid operand.");
700 // Once we reach end of statement, continue parsing so we can add default
701 // values for optional arguments.
702 AMDGPUAsmParser::OperandMatchResultTy Res;
703 while ((Res = parseOperand(Operands, Name)) != MatchOperand_NoMatch) {
704 if (Res != MatchOperand_Success)
705 return Error(getLexer().getLoc(), "failed parsing operand.");
710 //===----------------------------------------------------------------------===//
712 //===----------------------------------------------------------------------===//
714 AMDGPUAsmParser::OperandMatchResultTy
715 AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
718 // We are at the end of the statement, and this is a default argument, so
719 // use a default value.
720 if (getLexer().is(AsmToken::EndOfStatement)) {
722 return MatchOperand_Success;
725 switch(getLexer().getKind()) {
726 default: return MatchOperand_NoMatch;
727 case AsmToken::Identifier: {
728 StringRef OffsetName = Parser.getTok().getString();
729 if (!OffsetName.equals(Prefix))
730 return MatchOperand_NoMatch;
733 if (getLexer().isNot(AsmToken::Colon))
734 return MatchOperand_ParseFail;
737 if (getLexer().isNot(AsmToken::Integer))
738 return MatchOperand_ParseFail;
740 if (getParser().parseAbsoluteExpression(Int))
741 return MatchOperand_ParseFail;
745 return MatchOperand_Success;
748 AMDGPUAsmParser::OperandMatchResultTy
749 AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
750 enum AMDGPUOperand::ImmTy ImmTy) {
752 SMLoc S = Parser.getTok().getLoc();
755 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
756 if (Res != MatchOperand_Success)
759 Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
760 return MatchOperand_Success;
763 AMDGPUAsmParser::OperandMatchResultTy
764 AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
765 enum AMDGPUOperand::ImmTy ImmTy) {
767 SMLoc S = Parser.getTok().getLoc();
769 // We are at the end of the statement, and this is a default argument, so
770 // use a default value.
771 if (getLexer().isNot(AsmToken::EndOfStatement)) {
772 switch(getLexer().getKind()) {
773 case AsmToken::Identifier: {
774 StringRef Tok = Parser.getTok().getString();
778 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
782 return MatchOperand_NoMatch;
787 return MatchOperand_NoMatch;
791 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
792 return MatchOperand_Success;
795 static bool operandsHasOptionalOp(const OperandVector &Operands,
796 const OptionalOperand &OOp) {
797 for (unsigned i = 0; i < Operands.size(); i++) {
798 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
799 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
800 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
807 AMDGPUAsmParser::OperandMatchResultTy
808 AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
809 OperandVector &Operands) {
810 SMLoc S = Parser.getTok().getLoc();
811 for (const OptionalOperand &Op : OptionalOps) {
812 if (operandsHasOptionalOp(Operands, Op))
814 AMDGPUAsmParser::OperandMatchResultTy Res;
817 Res = parseNamedBit(Op.Name, Operands, Op.Type);
818 if (Res == MatchOperand_NoMatch)
823 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
825 if (Res == MatchOperand_NoMatch)
828 if (Res != MatchOperand_Success)
831 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
832 return MatchOperand_ParseFail;
835 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
836 return MatchOperand_Success;
838 return MatchOperand_NoMatch;
841 //===----------------------------------------------------------------------===//
843 //===----------------------------------------------------------------------===//
845 static const OptionalOperand DSOptionalOps [] = {
846 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
847 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
850 static const OptionalOperand DSOptionalOpsOff01 [] = {
851 {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
852 {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
853 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
856 AMDGPUAsmParser::OperandMatchResultTy
857 AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
858 return parseOptionalOps(DSOptionalOps, Operands);
860 AMDGPUAsmParser::OperandMatchResultTy
861 AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
862 return parseOptionalOps(DSOptionalOpsOff01, Operands);
865 AMDGPUAsmParser::OperandMatchResultTy
866 AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
867 SMLoc S = Parser.getTok().getLoc();
868 AMDGPUAsmParser::OperandMatchResultTy Res =
869 parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
870 if (Res == MatchOperand_NoMatch) {
871 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
872 AMDGPUOperand::ImmTyOffset));
873 Res = MatchOperand_Success;
878 bool AMDGPUOperand::isDSOffset() const {
879 return isImm() && isUInt<16>(getImm());
882 bool AMDGPUOperand::isDSOffset01() const {
883 return isImm() && isUInt<8>(getImm());
886 void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
887 const OperandVector &Operands) {
889 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
891 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
892 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
894 // Add the register arguments
896 Op.addRegOperands(Inst, 1);
900 // Handle optional arguments
901 OptionalIdx[Op.getImmTy()] = i;
904 unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0];
905 unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1];
906 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
908 ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0
909 ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1
910 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
911 Inst.addOperand(MCOperand::CreateReg(AMDGPU::M0)); // m0
914 void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
916 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
917 bool GDSOnly = false;
919 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
920 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
922 // Add the register arguments
924 Op.addRegOperands(Inst, 1);
928 if (Op.isToken() && Op.getToken() == "gds") {
933 // Handle optional arguments
934 OptionalIdx[Op.getImmTy()] = i;
937 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
938 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset
941 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
942 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
944 Inst.addOperand(MCOperand::CreateReg(AMDGPU::M0)); // m0
948 //===----------------------------------------------------------------------===//
950 //===----------------------------------------------------------------------===//
952 bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
953 StringRef CntName = Parser.getTok().getString();
957 if (getLexer().isNot(AsmToken::LParen))
961 if (getLexer().isNot(AsmToken::Integer))
964 if (getParser().parseAbsoluteExpression(CntVal))
967 if (getLexer().isNot(AsmToken::RParen))
971 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
977 if (CntName == "vmcnt") {
980 } else if (CntName == "expcnt") {
983 } else if (CntName == "lgkmcnt") {
990 IntVal &= ~(CntMask << CntShift);
991 IntVal |= (CntVal << CntShift);
995 AMDGPUAsmParser::OperandMatchResultTy
996 AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
997 // Disable all counters by default.
1001 int64_t CntVal = 0x77f;
1002 SMLoc S = Parser.getTok().getLoc();
1004 switch(getLexer().getKind()) {
1005 default: return MatchOperand_ParseFail;
1006 case AsmToken::Integer:
1007 // The operand can be an integer value.
1008 if (getParser().parseAbsoluteExpression(CntVal))
1009 return MatchOperand_ParseFail;
1012 case AsmToken::Identifier:
1014 if (parseCnt(CntVal))
1015 return MatchOperand_ParseFail;
1016 } while(getLexer().isNot(AsmToken::EndOfStatement));
1019 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1020 return MatchOperand_Success;
1023 bool AMDGPUOperand::isSWaitCnt() const {
1027 //===----------------------------------------------------------------------===//
1028 // sopp branch targets
1029 //===----------------------------------------------------------------------===//
1031 AMDGPUAsmParser::OperandMatchResultTy
1032 AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1033 SMLoc S = Parser.getTok().getLoc();
1035 switch (getLexer().getKind()) {
1036 default: return MatchOperand_ParseFail;
1037 case AsmToken::Integer: {
1039 if (getParser().parseAbsoluteExpression(Imm))
1040 return MatchOperand_ParseFail;
1041 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1042 return MatchOperand_Success;
1045 case AsmToken::Identifier:
1046 Operands.push_back(AMDGPUOperand::CreateExpr(
1047 MCSymbolRefExpr::Create(getContext().GetOrCreateSymbol(
1048 Parser.getTok().getString()), getContext()), S));
1050 return MatchOperand_Success;
1054 //===----------------------------------------------------------------------===//
1056 //===----------------------------------------------------------------------===//
1058 static const OptionalOperand MubufOptionalOps [] = {
1059 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1060 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1061 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1062 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1065 AMDGPUAsmParser::OperandMatchResultTy
1066 AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1067 return parseOptionalOps(MubufOptionalOps, Operands);
1070 AMDGPUAsmParser::OperandMatchResultTy
1071 AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1072 return parseIntWithPrefix("offset", Operands);
1075 AMDGPUAsmParser::OperandMatchResultTy
1076 AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1077 return parseNamedBit("glc", Operands);
1080 AMDGPUAsmParser::OperandMatchResultTy
1081 AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1082 return parseNamedBit("slc", Operands);
1085 AMDGPUAsmParser::OperandMatchResultTy
1086 AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1087 return parseNamedBit("tfe", Operands);
1090 bool AMDGPUOperand::isMubufOffset() const {
1091 return isImm() && isUInt<12>(getImm());
1094 void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1095 const OperandVector &Operands) {
1096 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1098 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1099 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1101 // Add the register arguments
1103 Op.addRegOperands(Inst, 1);
1107 // Handle the case where soffset is an immediate
1108 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1109 Op.addImmOperands(Inst, 1);
1113 // Handle tokens like 'offen' which are sometimes hard-coded into the
1114 // asm string. There are no MCInst operands for these.
1120 // Handle optional arguments
1121 OptionalIdx[Op.getImmTy()] = i;
1124 assert(OptionalIdx.size() == 4);
1126 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1127 unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1128 unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1129 unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1131 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1);
1132 ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1133 ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1134 ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
1137 //===----------------------------------------------------------------------===//
1139 //===----------------------------------------------------------------------===//
1141 AMDGPUAsmParser::OperandMatchResultTy
1142 AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
1143 return parseIntWithPrefix("dmask", Operands);
1146 AMDGPUAsmParser::OperandMatchResultTy
1147 AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
1148 return parseNamedBit("unorm", Operands);
1151 AMDGPUAsmParser::OperandMatchResultTy
1152 AMDGPUAsmParser::parseR128(OperandVector &Operands) {
1153 return parseNamedBit("r128", Operands);
1156 //===----------------------------------------------------------------------===//
1158 //===----------------------------------------------------------------------===//
1160 static bool ConvertOmodMul(int64_t &Mul) {
1161 if (Mul != 1 && Mul != 2 && Mul != 4)
1168 static bool ConvertOmodDiv(int64_t &Div) {
1182 static const OptionalOperand VOP3OptionalOps [] = {
1183 {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1184 {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1185 {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1188 static bool isVOP3(OperandVector &Operands) {
1189 if (operandsHaveModifiers(Operands))
1192 AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
1194 if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
1197 if (Operands.size() >= 5)
1200 if (Operands.size() > 3) {
1201 AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
1202 if (Src1Op.getReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1203 Src1Op.isRegClass(AMDGPU::SReg_64RegClassID)))
1209 AMDGPUAsmParser::OperandMatchResultTy
1210 AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1212 // The value returned by this function may change after parsing
1213 // an operand so store the original value here.
1214 bool HasModifiers = operandsHaveModifiers(Operands);
1216 bool IsVOP3 = isVOP3(Operands);
1217 if (HasModifiers || IsVOP3 ||
1218 getLexer().isNot(AsmToken::EndOfStatement) ||
1219 getForcedEncodingSize() == 64) {
1221 AMDGPUAsmParser::OperandMatchResultTy Res =
1222 parseOptionalOps(VOP3OptionalOps, Operands);
1224 if (!HasModifiers && Res == MatchOperand_Success) {
1225 // We have added a modifier operation, so we need to make sure all
1226 // previous register operands have modifiers
1227 for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1228 AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1235 return MatchOperand_NoMatch;
1238 void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
1239 ((AMDGPUOperand &)*Operands[1]).addRegOperands(Inst, 1);
1242 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1244 if (operandsHaveModifiers(Operands)) {
1245 for (unsigned e = Operands.size(); i != e; ++i) {
1246 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1248 if (Op.isRegWithInputMods()) {
1249 ((AMDGPUOperand &)*Operands[i]).addRegWithInputModsOperands(Inst, 2);
1252 OptionalIdx[Op.getImmTy()] = i;
1255 unsigned ClampIdx = OptionalIdx[AMDGPUOperand::ImmTyClamp];
1256 unsigned OModIdx = OptionalIdx[AMDGPUOperand::ImmTyOMod];
1258 ((AMDGPUOperand &)*Operands[ClampIdx]).addImmOperands(Inst, 1);
1259 ((AMDGPUOperand &)*Operands[OModIdx]).addImmOperands(Inst, 1);
1261 for (unsigned e = Operands.size(); i != e; ++i)
1262 ((AMDGPUOperand &)*Operands[i]).addRegOrImmOperands(Inst, 1);
1266 /// Force static initialization.
1267 extern "C" void LLVMInitializeR600AsmParser() {
1268 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
1269 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
1272 #define GET_REGISTER_MATCHER
1273 #define GET_MATCHER_IMPLEMENTATION
1274 #include "AMDGPUGenAsmMatcher.inc"