70#define DEBUG_TYPE "asm-parser"
77enum class ImplicitItModeTy {
Always,
Never, ARMOnly, ThumbOnly };
80 "arm-implicit-it",
cl::init(ImplicitItModeTy::ARMOnly),
81 cl::desc(
"Allow conditional instructions outside of an IT block"),
83 "Accept in both ISAs, emit implicit ITs in Thumb"),
85 "Warn in ARM, reject in Thumb"),
87 "Accept in ARM, reject in Thumb"),
88 clEnumValN(ImplicitItModeTy::ThumbOnly,
"thumb",
89 "Warn in ARM, emit implicit ITs in Thumb")));
94enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
96static inline unsigned extractITMaskBit(
unsigned Mask,
unsigned Position) {
103 return (Mask >> (5 - Position) & 1);
112 Locs PersonalityLocs;
113 Locs PersonalityIndexLocs;
114 Locs HandlerDataLocs;
120 bool hasFnStart()
const {
return !FnStartLocs.empty(); }
121 bool cantUnwind()
const {
return !CantUnwindLocs.empty(); }
122 bool hasHandlerData()
const {
return !HandlerDataLocs.empty(); }
124 bool hasPersonality()
const {
125 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
128 void recordFnStart(
SMLoc L) { FnStartLocs.push_back(L); }
129 void recordCantUnwind(
SMLoc L) { CantUnwindLocs.push_back(L); }
130 void recordPersonality(
SMLoc L) { PersonalityLocs.push_back(L); }
131 void recordHandlerData(
SMLoc L) { HandlerDataLocs.push_back(L); }
132 void recordPersonalityIndex(
SMLoc L) { PersonalityIndexLocs.push_back(L); }
137 void emitFnStartLocNotes()
const {
139 Parser.
Note(
Loc,
".fnstart was specified here");
142 void emitCantUnwindLocNotes()
const {
144 Parser.
Note(
Loc,
".cantunwind was specified here");
147 void emitHandlerDataLocNotes()
const {
149 Parser.
Note(
Loc,
".handlerdata was specified here");
152 void emitPersonalityLocNotes()
const {
154 PE = PersonalityLocs.end(),
155 PII = PersonalityIndexLocs.begin(),
156 PIE = PersonalityIndexLocs.end();
157 PI != PE || PII != PIE;) {
158 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
159 Parser.
Note(*PI++,
".personality was specified here");
160 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
161 Parser.
Note(*PII++,
".personalityindex was specified here");
164 "at the same location");
169 FnStartLocs = Locs();
170 CantUnwindLocs = Locs();
171 PersonalityLocs = Locs();
172 HandlerDataLocs = Locs();
173 PersonalityIndexLocs = Locs();
179class ARMMnemonicSets {
190 return CDE.
count(Mnemonic);
195 bool isVPTPredicableCDEInstr(
StringRef Mnemonic) {
198 return CDEWithVPTSuffix.
count(Mnemonic);
203 bool isITPredicableCDEInstr(
StringRef Mnemonic) {
213 bool isCDEDualRegInstr(
StringRef Mnemonic) {
216 return Mnemonic ==
"cx1d" || Mnemonic ==
"cx1da" ||
217 Mnemonic ==
"cx2d" || Mnemonic ==
"cx2da" ||
218 Mnemonic ==
"cx3d" || Mnemonic ==
"cx3da";
223 for (
StringRef Mnemonic: {
"cx1",
"cx1a",
"cx1d",
"cx1da",
224 "cx2",
"cx2a",
"cx2d",
"cx2da",
225 "cx3",
"cx3a",
"cx3d",
"cx3da", })
228 {
"vcx1",
"vcx1a",
"vcx2",
"vcx2a",
"vcx3",
"vcx3a"}) {
230 CDEWithVPTSuffix.
insert(Mnemonic);
231 CDEWithVPTSuffix.
insert(std::string(Mnemonic) +
"t");
232 CDEWithVPTSuffix.
insert(std::string(Mnemonic) +
"e");
242 assert(getParser().getStreamer().getTargetStreamer() &&
243 "do not have a target streamer");
251 bool NextSymbolIsThumb;
253 bool useImplicitITThumb()
const {
254 return ImplicitItMode == ImplicitItModeTy::Always ||
255 ImplicitItMode == ImplicitItModeTy::ThumbOnly;
258 bool useImplicitITARM()
const {
259 return ImplicitItMode == ImplicitItModeTy::Always ||
260 ImplicitItMode == ImplicitItModeTy::ARMOnly;
275 unsigned CurPosition;
290 void onEndOfFile()
override {
291 flushPendingInstructions(getParser().getStreamer());
294 void flushPendingInstructions(
MCStreamer &Out)
override {
295 if (!inImplicitITBlock()) {
309 for (
const MCInst &Inst : PendingConditionalInsts) {
312 PendingConditionalInsts.clear();
316 ITState.CurPosition = ~0U;
319 bool inITBlock() {
return ITState.CurPosition != ~0U; }
320 bool inExplicitITBlock() {
return inITBlock() && ITState.IsExplicit; }
321 bool inImplicitITBlock() {
return inITBlock() && !ITState.IsExplicit; }
323 bool lastInITBlock() {
327 void forwardITPosition() {
328 if (!inITBlock())
return;
333 if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
334 ITState.CurPosition = ~0U;
338 void rewindImplicitITPosition() {
339 assert(inImplicitITBlock());
340 assert(ITState.CurPosition > 1);
341 ITState.CurPosition--;
343 unsigned NewMask = 0;
344 NewMask |= ITState.Mask & (0xC << TZ);
345 NewMask |= 0x2 << TZ;
346 ITState.Mask = NewMask;
351 void discardImplicitITBlock() {
352 assert(inImplicitITBlock());
353 assert(ITState.CurPosition == 1);
354 ITState.CurPosition = ~0U;
359 unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
365 void invertCurrentITCondition() {
366 if (ITState.CurPosition == 1) {
369 ITState.Mask ^= 1 << (5 - ITState.CurPosition);
374 bool isITBlockFull() {
375 return inITBlock() && (ITState.Mask & 1);
381 assert(inImplicitITBlock());
386 unsigned NewMask = 0;
388 NewMask |= ITState.Mask & (0xE << TZ);
390 NewMask |= (
Cond != ITState.Cond) << TZ;
392 NewMask |= 1 << (TZ - 1);
393 ITState.Mask = NewMask;
397 void startImplicitITBlock() {
401 ITState.CurPosition = 1;
402 ITState.IsExplicit =
false;
413 ITState.CurPosition = 0;
414 ITState.IsExplicit =
true;
419 unsigned CurPosition;
421 bool inVPTBlock() {
return VPTState.CurPosition != ~0U; }
422 void forwardVPTPosition() {
423 if (!inVPTBlock())
return;
425 if (++VPTState.CurPosition == 5 - TZ)
426 VPTState.CurPosition = ~0U;
430 return getParser().Note(L, Msg,
Range);
434 return getParser().Warning(L, Msg,
Range);
438 return getParser().Error(L, Msg,
Range);
442 unsigned MnemonicOpsEndInd,
unsigned ListIndex,
443 bool IsARPop =
false);
445 unsigned MnemonicOpsEndInd,
unsigned ListIndex);
447 MCRegister tryParseRegister(
bool AllowOutofBoundReg =
false);
450 std::optional<ARM_AM::ShiftOpc> tryParseShiftToken();
451 bool parseRegisterList(
OperandVector &,
bool EnforceOrder =
true,
452 bool AllowRAAC =
false,
bool IsLazyLoadStore =
false,
453 bool IsVSCCLRM =
false);
456 bool parseImmExpr(int64_t &Out);
459 unsigned &ShiftAmount);
460 bool parseLiteralValues(
unsigned Size,
SMLoc L);
461 bool parseDirectiveThumb(
SMLoc L);
462 bool parseDirectiveARM(
SMLoc L);
463 bool parseDirectiveThumbFunc(
SMLoc L);
464 bool parseDirectiveCode(
SMLoc L);
465 bool parseDirectiveSyntax(
SMLoc L);
467 bool parseDirectiveUnreq(
SMLoc L);
468 bool parseDirectiveArch(
SMLoc L);
469 bool parseDirectiveEabiAttr(
SMLoc L);
470 bool parseDirectiveCPU(
SMLoc L);
471 bool parseDirectiveFPU(
SMLoc L);
472 bool parseDirectiveFnStart(
SMLoc L);
473 bool parseDirectiveFnEnd(
SMLoc L);
474 bool parseDirectiveCantUnwind(
SMLoc L);
475 bool parseDirectivePersonality(
SMLoc L);
476 bool parseDirectiveHandlerData(
SMLoc L);
477 bool parseDirectiveSetFP(
SMLoc L);
478 bool parseDirectivePad(
SMLoc L);
479 bool parseDirectiveRegSave(
SMLoc L,
bool IsVector);
480 bool parseDirectiveInst(
SMLoc L,
char Suffix =
'\0');
481 bool parseDirectiveLtorg(
SMLoc L);
482 bool parseDirectiveEven(
SMLoc L);
483 bool parseDirectivePersonalityIndex(
SMLoc L);
484 bool parseDirectiveUnwindRaw(
SMLoc L);
485 bool parseDirectiveTLSDescSeq(
SMLoc L);
486 bool parseDirectiveMovSP(
SMLoc L);
487 bool parseDirectiveObjectArch(
SMLoc L);
488 bool parseDirectiveArchExtension(
SMLoc L);
489 bool parseDirectiveAlign(
SMLoc L);
490 bool parseDirectiveThumbSet(
SMLoc L);
492 bool parseDirectiveSEHAllocStack(
SMLoc L,
bool Wide);
493 bool parseDirectiveSEHSaveRegs(
SMLoc L,
bool Wide);
494 bool parseDirectiveSEHSaveSP(
SMLoc L);
495 bool parseDirectiveSEHSaveFRegs(
SMLoc L);
496 bool parseDirectiveSEHSaveLR(
SMLoc L);
497 bool parseDirectiveSEHPrologEnd(
SMLoc L,
bool Fragment);
498 bool parseDirectiveSEHNop(
SMLoc L,
bool Wide);
499 bool parseDirectiveSEHEpilogStart(
SMLoc L,
bool Condition);
500 bool parseDirectiveSEHEpilogEnd(
SMLoc L);
501 bool parseDirectiveSEHCustom(
SMLoc L);
503 std::unique_ptr<ARMOperand> defaultCondCodeOp();
504 std::unique_ptr<ARMOperand> defaultCCOutOp();
505 std::unique_ptr<ARMOperand> defaultVPTPredOp();
511 bool &CarrySetting,
unsigned &ProcessorIMod,
514 StringRef FullInst,
bool &CanAcceptCarrySet,
515 bool &CanAcceptPredicationCode,
516 bool &CanAcceptVPTPredicationCode);
519 void tryConvertingToTwoOperandForm(
StringRef Mnemonic,
522 unsigned MnemonicOpsEndInd);
525 unsigned MnemonicOpsEndInd);
529 return getSTI().hasFeature(ARM::ModeThumb);
532 bool isThumbOne()
const {
533 return isThumb() && !getSTI().hasFeature(ARM::FeatureThumb2);
536 bool isThumbTwo()
const {
537 return isThumb() && getSTI().hasFeature(ARM::FeatureThumb2);
540 bool hasThumb()
const {
541 return getSTI().hasFeature(ARM::HasV4TOps);
544 bool hasThumb2()
const {
545 return getSTI().hasFeature(ARM::FeatureThumb2);
548 bool hasV6Ops()
const {
549 return getSTI().hasFeature(ARM::HasV6Ops);
552 bool hasV6T2Ops()
const {
553 return getSTI().hasFeature(ARM::HasV6T2Ops);
556 bool hasV6MOps()
const {
557 return getSTI().hasFeature(ARM::HasV6MOps);
560 bool hasV7Ops()
const {
561 return getSTI().hasFeature(ARM::HasV7Ops);
564 bool hasV8Ops()
const {
565 return getSTI().hasFeature(ARM::HasV8Ops);
568 bool hasV8MBaseline()
const {
569 return getSTI().hasFeature(ARM::HasV8MBaselineOps);
572 bool hasV8MMainline()
const {
573 return getSTI().hasFeature(ARM::HasV8MMainlineOps);
575 bool hasV8_1MMainline()
const {
576 return getSTI().hasFeature(ARM::HasV8_1MMainlineOps);
578 bool hasMVEFloat()
const {
579 return getSTI().hasFeature(ARM::HasMVEFloatOps);
581 bool hasCDE()
const {
582 return getSTI().hasFeature(ARM::HasCDEOps);
584 bool has8MSecExt()
const {
585 return getSTI().hasFeature(ARM::Feature8MSecExt);
588 bool hasARM()
const {
589 return !getSTI().hasFeature(ARM::FeatureNoARM);
592 bool hasDSP()
const {
593 return getSTI().hasFeature(ARM::FeatureDSP);
596 bool hasD32()
const {
597 return getSTI().hasFeature(ARM::FeatureD32);
600 bool hasV8_1aOps()
const {
601 return getSTI().hasFeature(ARM::HasV8_1aOps);
604 bool hasRAS()
const {
605 return getSTI().hasFeature(ARM::FeatureRAS);
610 auto FB = ComputeAvailableFeatures(STI.
ToggleFeature(ARM::ModeThumb));
611 setAvailableFeatures(FB);
614 void FixModeAfterArchChange(
bool WasThumb,
SMLoc Loc);
616 bool isMClass()
const {
617 return getSTI().hasFeature(ARM::FeatureMClass);
623#define GET_ASSEMBLER_HEADER
624#include "ARMGenAsmMatcher.inc"
655 ParseStatus parseVectorLane(VectorLaneTy &LaneKind,
unsigned &Index,
664 unsigned MnemonicOpsEndInd);
666 unsigned MnemonicOpsEndInd, MCStreamer &Out);
667 bool shouldOmitVectorPredicateOperand(StringRef Mnemonic,
669 unsigned MnemonicOpsEndInd);
670 bool isITBlockTerminator(MCInst &Inst)
const;
672 void fixupGNULDRDAlias(StringRef Mnemonic,
OperandVector &Operands,
673 unsigned MnemonicOpsEndInd);
674 bool validateLDRDSTRD(MCInst &Inst,
const OperandVector &Operands,
bool Load,
675 bool ARMMode,
bool Writeback,
676 unsigned MnemonicOpsEndInd);
679 enum ARMMatchResultTy {
680 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
681 Match_RequiresNotITBlock,
683 Match_RequiresThumb2,
685 Match_RequiresFlagSetting,
686#define GET_OPERAND_DIAGNOSTIC_TYPES
687#include "ARMGenAsmMatcher.inc"
691 ARMAsmParser(
const MCSubtargetInfo &STI, MCAsmParser &Parser,
692 const MCInstrInfo &MII,
const MCTargetOptions &
Options)
693 : MCTargetAsmParser(
Options, STI, MII), UC(Parser), MS(STI) {
700 setAvailableFeatures(ComputeAvailableFeatures(STI.
getFeatureBits()));
704 getTargetStreamer().emitTargetAttributes(STI);
707 ITState.CurPosition = ~0
U;
709 VPTState.CurPosition = ~0
U;
711 NextSymbolIsThumb =
false;
715 bool parseRegister(MCRegister &
Reg, SMLoc &StartLoc, SMLoc &EndLoc)
override;
716 ParseStatus tryParseRegister(MCRegister &
Reg, SMLoc &StartLoc,
717 SMLoc &EndLoc)
override;
718 bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
720 bool ParseDirective(AsmToken DirectiveID)
override;
722 unsigned validateTargetOperandClass(MCParsedAsmOperand &
Op,
723 unsigned Kind)
override;
724 unsigned checkTargetMatchPredicate(MCInst &Inst)
override;
726 checkEarlyTargetMatchPredicate(MCInst &Inst,
729 bool matchAndEmitInstruction(SMLoc IDLoc,
unsigned &Opcode,
732 bool MatchingInlineAsm)
override;
733 unsigned MatchInstruction(
OperandVector &Operands, MCInst &Inst,
734 SmallVectorImpl<NearMissInfo> &NearMisses,
735 bool MatchingInlineAsm,
bool &EmitInITBlock,
738 struct NearMissMessage {
740 SmallString<128> Message;
743 const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
745 void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
746 SmallVectorImpl<NearMissMessage> &NearMissesOut,
748 void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
751 void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc)
override;
753 void onLabelParsed(MCSymbol *Symbol)
override;
755 const MCInstrDesc &getInstrDesc(
unsigned int Opcode)
const {
756 return MII.get(Opcode);
759 bool hasMVE()
const {
return getSTI().hasFeature(ARM::HasMVEIntegerOps); }
762 MCRegister getDRegFromQReg(MCRegister QReg)
const {
763 return MRI->
getSubReg(QReg, ARM::dsub_0);
766 const MCRegisterInfo *getMRI()
const {
return MRI; }
771class ARMOperand :
public MCParsedAsmOperand {
782 k_InstSyncBarrierOpt,
783 k_TraceSyncBarrierOpt,
792 k_RegisterListWithAPSR,
795 k_FPSRegisterListWithVPR,
796 k_FPDRegisterListWithVPR,
798 k_VectorListAllLanes,
805 k_ConstantPoolImmediate,
806 k_BitfieldDescriptor,
810 SMLoc StartLoc, EndLoc, AlignmentLoc;
813 ARMAsmParser *Parser;
827 struct CoprocOptionOp {
869 struct VectorListOp {
876 struct VectorIndexOp {
886 MCRegister BaseRegNum;
889 const MCExpr *OffsetImm;
890 MCRegister OffsetRegNum;
895 unsigned isNegative : 1;
898 struct PostIdxRegOp {
905 struct ShifterImmOp {
910 struct RegShiftedRegOp {
917 struct RegShiftedImmOp {
941 struct CoprocOptionOp CoprocOption;
942 struct MBOptOp MBOpt;
943 struct ISBOptOp ISBOpt;
944 struct TSBOptOp TSBOpt;
945 struct ITMaskOp ITMask;
947 struct MMaskOp MMask;
948 struct BankedRegOp BankedReg;
951 struct VectorListOp VectorList;
952 struct VectorIndexOp VectorIndex;
954 struct MemoryOp Memory;
955 struct PostIdxRegOp PostIdxReg;
956 struct ShifterImmOp ShifterImm;
957 struct RegShiftedRegOp RegShiftedReg;
958 struct RegShiftedImmOp RegShiftedImm;
959 struct RotImmOp RotImm;
960 struct ModImmOp ModImm;
965 ARMOperand(KindTy K, ARMAsmParser &Parser) : Kind(
K), Parser(&Parser) {}
968 SMLoc getStartLoc()
const override {
return StartLoc; }
971 SMLoc getEndLoc()
const override {
return EndLoc; }
975 SMRange getLocRange()
const {
return SMRange(StartLoc, EndLoc); }
978 SMLoc getAlignmentLoc()
const {
979 assert(Kind == k_Memory &&
"Invalid access!");
984 assert(Kind == k_CondCode &&
"Invalid access!");
989 assert(isVPTPred() &&
"Invalid access!");
993 unsigned getCoproc()
const {
994 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) &&
"Invalid access!");
999 assert(Kind == k_Token &&
"Invalid access!");
1000 return StringRef(Tok.Data, Tok.Length);
1003 MCRegister
getReg()
const override {
1004 assert((Kind == k_Register || Kind == k_CCOut) &&
"Invalid access!");
1008 const SmallVectorImpl<MCRegister> &getRegList()
const {
1009 assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
1010 Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
1011 Kind == k_FPSRegisterListWithVPR ||
1012 Kind == k_FPDRegisterListWithVPR) &&
1017 const MCExpr *
getImm()
const {
1018 assert(isImm() &&
"Invalid access!");
1022 const MCExpr *getConstantPoolImm()
const {
1023 assert(isConstantPoolImm() &&
"Invalid access!");
1027 unsigned getVectorIndex()
const {
1028 assert(Kind == k_VectorIndex &&
"Invalid access!");
1029 return VectorIndex.Val;
1033 assert(Kind == k_MemBarrierOpt &&
"Invalid access!");
1038 assert(Kind == k_InstSyncBarrierOpt &&
"Invalid access!");
1043 assert(Kind == k_TraceSyncBarrierOpt &&
"Invalid access!");
1048 assert(Kind == k_ProcIFlags &&
"Invalid access!");
1052 unsigned getMSRMask()
const {
1053 assert(Kind == k_MSRMask &&
"Invalid access!");
1057 unsigned getBankedReg()
const {
1058 assert(Kind == k_BankedReg &&
"Invalid access!");
1059 return BankedReg.Val;
1062 bool isCoprocNum()
const {
return Kind == k_CoprocNum; }
1063 bool isCoprocReg()
const {
return Kind == k_CoprocReg; }
1064 bool isCoprocOption()
const {
return Kind == k_CoprocOption; }
1065 bool isCondCode()
const {
return Kind == k_CondCode; }
1066 bool isVPTPred()
const {
return Kind == k_VPTPred; }
1067 bool isCCOut()
const {
return Kind == k_CCOut; }
1068 bool isITMask()
const {
return Kind == k_ITCondMask; }
1069 bool isITCondCode()
const {
return Kind == k_CondCode; }
1070 bool isImm()
const override {
1071 return Kind == k_Immediate;
1074 bool isARMBranchTarget()
const {
1075 if (!isImm())
return false;
1078 return CE->getValue() % 4 == 0;
1083 bool isThumbBranchTarget()
const {
1084 if (!isImm())
return false;
1087 return CE->getValue() % 2 == 0;
1093 template<
unsigned w
idth,
unsigned scale>
1094 bool isUnsignedOffset()
const {
1095 if (!isImm())
return false;
1098 int64_t Val =
CE->getValue();
1100 int64_t
Max =
Align * ((1LL << width) - 1);
1101 return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
1108 template<
unsigned w
idth,
unsigned scale>
1109 bool isSignedOffset()
const {
1110 if (!isImm())
return false;
1113 int64_t Val =
CE->getValue();
1115 int64_t
Max =
Align * ((1LL << (width-1)) - 1);
1116 int64_t Min = -
Align * (1LL << (width-1));
1117 return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1124 bool isLEOffset()
const {
1125 if (!isImm())
return false;
1128 int64_t Val =
CE->getValue();
1129 return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1138 bool isThumbMemPC()
const {
1143 if (!CE)
return false;
1144 Val =
CE->getValue();
1146 else if (isGPRMem()) {
1147 if(!Memory.OffsetImm || Memory.OffsetRegNum)
return false;
1148 if(Memory.BaseRegNum != ARM::PC)
return false;
1150 Val =
CE->getValue();
1155 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1158 bool isFPImm()
const {
1159 if (!isImm())
return false;
1167 template<
int64_t N,
int64_t M>
1168 bool isImmediate()
const {
1169 if (!isImm())
return false;
1171 if (!CE)
return false;
1172 int64_t
Value =
CE->getValue();
1176 template<
int64_t N,
int64_t M>
1177 bool isImmediateS4()
const {
1178 if (!isImm())
return false;
1180 if (!CE)
return false;
1181 int64_t
Value =
CE->getValue();
1184 template<
int64_t N,
int64_t M>
1185 bool isImmediateS2()
const {
1186 if (!isImm())
return false;
1188 if (!CE)
return false;
1189 int64_t
Value =
CE->getValue();
1192 bool isFBits16()
const {
1193 return isImmediate<0, 17>();
1195 bool isFBits32()
const {
1196 return isImmediate<1, 33>();
1198 bool isImm8s4()
const {
1199 return isImmediateS4<-1020, 1020>();
1201 bool isImm7s4()
const {
1202 return isImmediateS4<-508, 508>();
1204 bool isImm7Shift0()
const {
1205 return isImmediate<-127, 127>();
1207 bool isImm7Shift1()
const {
1208 return isImmediateS2<-255, 255>();
1210 bool isImm7Shift2()
const {
1211 return isImmediateS4<-511, 511>();
1213 bool isImm7()
const {
1214 return isImmediate<-127, 127>();
1216 bool isImm0_1020s4()
const {
1217 return isImmediateS4<0, 1020>();
1219 bool isImm0_508s4()
const {
1220 return isImmediateS4<0, 508>();
1222 bool isImm0_508s4Neg()
const {
1223 if (!isImm())
return false;
1225 if (!CE)
return false;
1226 int64_t
Value = -
CE->getValue();
1231 bool isImm0_4095Neg()
const {
1232 if (!isImm())
return false;
1234 if (!CE)
return false;
1239 if ((
CE->getValue() >> 32) > 0)
return false;
1240 uint32_t
Value = -
static_cast<uint32_t
>(
CE->getValue());
1244 bool isImm0_7()
const {
1245 return isImmediate<0, 7>();
1248 bool isImm1_16()
const {
1249 return isImmediate<1, 16>();
1252 bool isImm1_32()
const {
1253 return isImmediate<1, 32>();
1256 bool isImm8_255()
const {
1257 return isImmediate<8, 255>();
1260 bool isImm0_255Expr()
const {
1268 int64_t
Value =
CE->getValue();
1272 bool isImm256_65535Expr()
const {
1273 if (!isImm())
return false;
1277 if (!CE)
return true;
1278 int64_t
Value =
CE->getValue();
1282 bool isImm0_65535Expr()
const {
1283 if (!isImm())
return false;
1287 if (!CE)
return true;
1288 int64_t
Value =
CE->getValue();
1292 bool isImm24bit()
const {
1293 return isImmediate<0, 0xffffff + 1>();
1296 bool isImmThumbSR()
const {
1297 return isImmediate<1, 33>();
1300 bool isPKHLSLImm()
const {
1301 return isImmediate<0, 32>();
1304 bool isPKHASRImm()
const {
1305 return isImmediate<0, 33>();
1308 bool isAdrLabel()
const {
1315 if (!isImm())
return false;
1317 if (!CE)
return false;
1318 int64_t
Value =
CE->getValue();
1323 bool isT2SOImm()
const {
1330 return (!ARM16Expr || (ARM16Expr->getSpecifier() !=
ARM::S_HI16 &&
1333 if (!isImm())
return false;
1335 if (!CE)
return false;
1336 int64_t
Value =
CE->getValue();
1340 bool isT2SOImmNot()
const {
1341 if (!isImm())
return false;
1343 if (!CE)
return false;
1344 int64_t
Value =
CE->getValue();
1349 bool isT2SOImmNeg()
const {
1350 if (!isImm())
return false;
1352 if (!CE)
return false;
1353 int64_t
Value =
CE->getValue();
1359 bool isSetEndImm()
const {
1360 if (!isImm())
return false;
1362 if (!CE)
return false;
1363 int64_t
Value =
CE->getValue();
1367 bool isReg()
const override {
return Kind == k_Register; }
1368 bool isRegList()
const {
return Kind == k_RegisterList; }
1369 bool isRegListWithAPSR()
const {
1370 return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1372 bool isDReg()
const {
1374 ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
Reg.RegNum);
1376 bool isQReg()
const {
1378 ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
Reg.RegNum);
1380 bool isDPRRegList()
const {
return Kind == k_DPRRegisterList; }
1381 bool isSPRRegList()
const {
return Kind == k_SPRRegisterList; }
1382 bool isFPSRegListWithVPR()
const {
return Kind == k_FPSRegisterListWithVPR; }
1383 bool isFPDRegListWithVPR()
const {
return Kind == k_FPDRegisterListWithVPR; }
1384 bool isToken()
const override {
return Kind == k_Token; }
1385 bool isMemBarrierOpt()
const {
return Kind == k_MemBarrierOpt; }
1386 bool isInstSyncBarrierOpt()
const {
return Kind == k_InstSyncBarrierOpt; }
1387 bool isTraceSyncBarrierOpt()
const {
return Kind == k_TraceSyncBarrierOpt; }
1388 bool isMem()
const override {
1389 return isGPRMem() || isMVEMem();
1391 bool isMVEMem()
const {
1392 if (Kind != k_Memory)
1394 if (Memory.BaseRegNum &&
1395 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) &&
1396 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum))
1398 if (Memory.OffsetRegNum &&
1399 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1400 Memory.OffsetRegNum))
1404 bool isGPRMem()
const {
1405 if (Kind != k_Memory)
1407 if (Memory.BaseRegNum &&
1408 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1410 if (Memory.OffsetRegNum &&
1411 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1415 bool isShifterImm()
const {
return Kind == k_ShifterImmediate; }
1416 bool isRegShiftedReg()
const {
1417 return Kind == k_ShiftedRegister &&
1418 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1419 RegShiftedReg.SrcReg) &&
1420 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1421 RegShiftedReg.ShiftReg);
1423 bool isRegShiftedImm()
const {
1424 return Kind == k_ShiftedImmediate &&
1425 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1426 RegShiftedImm.SrcReg);
1428 bool isRotImm()
const {
return Kind == k_RotateImmediate; }
1430 template<
unsigned Min,
unsigned Max>
1431 bool isPowerTwoInRange()
const {
1432 if (!isImm())
return false;
1434 if (!CE)
return false;
1435 int64_t
Value =
CE->getValue();
1439 bool isModImm()
const {
return Kind == k_ModifiedImmediate; }
1441 bool isModImmNot()
const {
1442 if (!isImm())
return false;
1444 if (!CE)
return false;
1445 int64_t
Value =
CE->getValue();
1449 bool isModImmNeg()
const {
1450 if (!isImm())
return false;
1452 if (!CE)
return false;
1453 int64_t
Value =
CE->getValue();
1458 bool isThumbModImmNeg1_7()
const {
1459 if (!isImm())
return false;
1461 if (!CE)
return false;
1462 int32_t
Value = -(int32_t)
CE->getValue();
1466 bool isThumbModImmNeg8_255()
const {
1467 if (!isImm())
return false;
1469 if (!CE)
return false;
1470 int32_t
Value = -(int32_t)
CE->getValue();
1474 bool isConstantPoolImm()
const {
return Kind == k_ConstantPoolImmediate; }
1475 bool isBitfield()
const {
return Kind == k_BitfieldDescriptor; }
1476 bool isPostIdxRegShifted()
const {
1477 return Kind == k_PostIndexRegister &&
1478 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1480 bool isPostIdxReg()
const {
1483 bool isMemNoOffset(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1487 return !Memory.OffsetRegNum && Memory.OffsetImm ==
nullptr &&
1488 (alignOK || Memory.Alignment == Alignment);
1490 bool isMemNoOffsetT2(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1494 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].
contains(
1499 return !Memory.OffsetRegNum && Memory.OffsetImm ==
nullptr &&
1500 (alignOK || Memory.Alignment == Alignment);
1502 bool isMemNoOffsetT2NoSp(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1506 if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].
contains(
1511 return !Memory.OffsetRegNum && Memory.OffsetImm ==
nullptr &&
1512 (alignOK || Memory.Alignment == Alignment);
1514 bool isMemNoOffsetT(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1518 if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].
contains(
1523 return !Memory.OffsetRegNum && Memory.OffsetImm ==
nullptr &&
1524 (alignOK || Memory.Alignment == Alignment);
1526 bool isMemPCRelImm12()
const {
1527 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1530 if (Memory.BaseRegNum != ARM::PC)
1533 if (!Memory.OffsetImm)
return true;
1535 int64_t Val =
CE->getValue();
1536 return (Val > -4096 && Val < 4096) ||
1537 (Val == std::numeric_limits<int32_t>::min());
1542 bool isAlignedMemory()
const {
1543 return isMemNoOffset(
true);
1546 bool isAlignedMemoryNone()
const {
1547 return isMemNoOffset(
false, 0);
1550 bool isDupAlignedMemoryNone()
const {
1551 return isMemNoOffset(
false, 0);
1554 bool isAlignedMemory16()
const {
1555 if (isMemNoOffset(
false, 2))
1557 return isMemNoOffset(
false, 0);
1560 bool isDupAlignedMemory16()
const {
1561 if (isMemNoOffset(
false, 2))
1563 return isMemNoOffset(
false, 0);
1566 bool isAlignedMemory32()
const {
1567 if (isMemNoOffset(
false, 4))
1569 return isMemNoOffset(
false, 0);
1572 bool isDupAlignedMemory32()
const {
1573 if (isMemNoOffset(
false, 4))
1575 return isMemNoOffset(
false, 0);
1578 bool isAlignedMemory64()
const {
1579 if (isMemNoOffset(
false, 8))
1581 return isMemNoOffset(
false, 0);
1584 bool isDupAlignedMemory64()
const {
1585 if (isMemNoOffset(
false, 8))
1587 return isMemNoOffset(
false, 0);
1590 bool isAlignedMemory64or128()
const {
1591 if (isMemNoOffset(
false, 8))
1593 if (isMemNoOffset(
false, 16))
1595 return isMemNoOffset(
false, 0);
1598 bool isDupAlignedMemory64or128()
const {
1599 if (isMemNoOffset(
false, 8))
1601 if (isMemNoOffset(
false, 16))
1603 return isMemNoOffset(
false, 0);
1606 bool isAlignedMemory64or128or256()
const {
1607 if (isMemNoOffset(
false, 8))
1609 if (isMemNoOffset(
false, 16))
1611 if (isMemNoOffset(
false, 32))
1613 return isMemNoOffset(
false, 0);
1616 bool isAddrMode2()
const {
1617 if (!isGPRMem() || Memory.Alignment != 0)
return false;
1619 if (Memory.OffsetRegNum)
return true;
1621 if (!Memory.OffsetImm)
return true;
1623 int64_t Val =
CE->getValue();
1624 return Val > -4096 && Val < 4096;
1629 bool isAM2OffsetImm()
const {
1630 if (!isImm())
return false;
1633 if (!CE)
return false;
1634 int64_t Val =
CE->getValue();
1635 return (Val == std::numeric_limits<int32_t>::min()) ||
1636 (Val > -4096 && Val < 4096);
1639 bool isAddrMode3()
const {
1645 if (!isGPRMem() || Memory.Alignment != 0)
return false;
1649 if (Memory.OffsetRegNum)
return true;
1651 if (!Memory.OffsetImm)
return true;
1653 int64_t Val =
CE->getValue();
1656 return (Val > -256 && Val < 256) ||
1657 Val == std::numeric_limits<int32_t>::min();
1662 bool isAM3Offset()
const {
1669 if (!CE)
return false;
1670 int64_t Val =
CE->getValue();
1672 return (Val > -256 && Val < 256) ||
1673 Val == std::numeric_limits<int32_t>::min();
1676 bool isAddrMode5()
const {
1682 if (!isGPRMem() || Memory.Alignment != 0)
return false;
1684 if (Memory.OffsetRegNum)
return false;
1686 if (!Memory.OffsetImm)
return true;
1688 int64_t Val =
CE->getValue();
1689 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1690 Val == std::numeric_limits<int32_t>::min();
1695 bool isAddrMode5FP16()
const {
1701 if (!isGPRMem() || Memory.Alignment != 0)
return false;
1703 if (Memory.OffsetRegNum)
return false;
1705 if (!Memory.OffsetImm)
return true;
1707 int64_t Val =
CE->getValue();
1708 return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1709 Val == std::numeric_limits<int32_t>::min();
1714 bool isMemTBB()
const {
1715 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1721 bool isMemTBH()
const {
1722 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1723 Memory.ShiftType !=
ARM_AM::lsl || Memory.ShiftImm != 1 ||
1724 Memory.Alignment != 0 )
1729 bool isMemRegOffset()
const {
1730 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1735 bool isT2MemRegOffset()
const {
1736 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1737 Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1742 if (Memory.ShiftType !=
ARM_AM::lsl || Memory.ShiftImm > 3)
1747 bool isMemThumbRR()
const {
1750 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1757 bool isMemThumbRIs4()
const {
1758 if (!isGPRMem() || Memory.OffsetRegNum ||
1762 if (!Memory.OffsetImm)
return true;
1764 int64_t Val =
CE->getValue();
1765 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1770 bool isMemThumbRIs2()
const {
1771 if (!isGPRMem() || Memory.OffsetRegNum ||
1775 if (!Memory.OffsetImm)
return true;
1777 int64_t Val =
CE->getValue();
1778 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1783 bool isMemThumbRIs1()
const {
1784 if (!isGPRMem() || Memory.OffsetRegNum ||
1788 if (!Memory.OffsetImm)
return true;
1790 int64_t Val =
CE->getValue();
1791 return Val >= 0 && Val <= 31;
1796 bool isMemThumbSPI()
const {
1797 if (!isGPRMem() || Memory.OffsetRegNum || Memory.BaseRegNum != ARM::SP ||
1798 Memory.Alignment != 0)
1801 if (!Memory.OffsetImm)
return true;
1803 int64_t Val =
CE->getValue();
1804 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1809 bool isMemImm8s4Offset()
const {
1815 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1818 if (!Memory.OffsetImm)
return true;
1820 int64_t Val =
CE->getValue();
1822 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1823 Val == std::numeric_limits<int32_t>::min();
1828 bool isMemImm7s4Offset()
const {
1834 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0 ||
1835 !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1839 if (!Memory.OffsetImm)
return true;
1841 int64_t Val =
CE->getValue();
1843 return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1848 bool isMemImm0_1020s4Offset()
const {
1849 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1852 if (!Memory.OffsetImm)
return true;
1854 int64_t Val =
CE->getValue();
1855 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1860 bool isMemImm8Offset()
const {
1861 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1864 if (Memory.BaseRegNum == ARM::PC)
return false;
1866 if (!Memory.OffsetImm)
return true;
1868 int64_t Val =
CE->getValue();
1869 return (Val == std::numeric_limits<int32_t>::min()) ||
1870 (Val > -256 && Val < 256);
1875 template<
unsigned Bits,
unsigned RegClassID>
1876 bool isMemImm7ShiftedOffset()
const {
1877 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0 ||
1878 !ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum))
1884 if (!Memory.OffsetImm)
return true;
1886 int64_t Val =
CE->getValue();
1890 if (Val == INT32_MIN)
1893 unsigned Divisor = 1U <<
Bits;
1896 if (Val % Divisor != 0)
1901 return (Val >= -127 && Val <= 127);
1906 template <
int shift>
bool isMemRegRQOffset()
const {
1907 if (!isMVEMem() || Memory.OffsetImm !=
nullptr || Memory.Alignment != 0)
1910 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].
contains(
1913 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
1914 Memory.OffsetRegNum))
1921 (Memory.ShiftType !=
ARM_AM::uxtw || Memory.ShiftImm != shift))
1927 template <
int shift>
bool isMemRegQOffset()
const {
1928 if (!isMVEMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1931 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
1935 if (!Memory.OffsetImm)
1937 static_assert(shift < 56,
1938 "Such that we dont shift by a value higher than 62");
1940 int64_t Val =
CE->getValue();
1943 if ((Val & ((1U << shift) - 1)) != 0)
1949 int64_t
Range = (1U << (7 + shift)) - 1;
1950 return (Val == INT32_MIN) || (Val > -
Range && Val <
Range);
1955 bool isMemPosImm8Offset()
const {
1956 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1959 if (!Memory.OffsetImm)
return true;
1961 int64_t Val =
CE->getValue();
1962 return Val >= 0 && Val < 256;
1967 bool isMemNegImm8Offset()
const {
1968 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1971 if (Memory.BaseRegNum == ARM::PC)
return false;
1973 if (!Memory.OffsetImm)
return false;
1975 int64_t Val =
CE->getValue();
1976 return (Val == std::numeric_limits<int32_t>::min()) ||
1977 (Val > -256 && Val < 0);
1982 bool isMemUImm12Offset()
const {
1983 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1986 if (!Memory.OffsetImm)
return true;
1988 int64_t Val =
CE->getValue();
1989 return (Val >= 0 && Val < 4096);
1994 bool isMemImm12Offset()
const {
2002 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
2005 if (!Memory.OffsetImm)
return true;
2007 int64_t Val =
CE->getValue();
2008 return (Val > -4096 && Val < 4096) ||
2009 (Val == std::numeric_limits<int32_t>::min());
2016 bool isConstPoolAsmImm()
const {
2019 return (isConstantPoolImm());
2022 bool isPostIdxImm8()
const {
2023 if (!isImm())
return false;
2025 if (!CE)
return false;
2026 int64_t Val =
CE->getValue();
2027 return (Val > -256 && Val < 256) ||
2028 (Val == std::numeric_limits<int32_t>::min());
2031 bool isPostIdxImm8s4()
const {
2032 if (!isImm())
return false;
2034 if (!CE)
return false;
2035 int64_t Val =
CE->getValue();
2036 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2037 (Val == std::numeric_limits<int32_t>::min());
2040 bool isMSRMask()
const {
return Kind == k_MSRMask; }
2041 bool isBankedReg()
const {
return Kind == k_BankedReg; }
2042 bool isProcIFlags()
const {
return Kind == k_ProcIFlags; }
2045 bool isAnyVectorList()
const {
2046 return Kind == k_VectorList || Kind == k_VectorListAllLanes ||
2047 Kind == k_VectorListIndexed;
2050 bool isVectorList()
const {
return Kind == k_VectorList; }
2052 bool isSingleSpacedVectorList()
const {
2053 return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2056 bool isDoubleSpacedVectorList()
const {
2057 return Kind == k_VectorList && VectorList.isDoubleSpaced;
2060 bool isVecListOneD()
const {
2062 if (isDReg() && !Parser->hasMVE())
2064 if (!isSingleSpacedVectorList())
return false;
2065 return VectorList.Count == 1;
2068 bool isVecListTwoMQ()
const {
2069 return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2070 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2074 bool isVecListDPair()
const {
2077 if (isQReg() && !Parser->hasMVE())
2079 if (!isSingleSpacedVectorList())
return false;
2080 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2084 bool isVecListThreeD()
const {
2085 if (!isSingleSpacedVectorList())
return false;
2086 return VectorList.Count == 3;
2089 bool isVecListFourD()
const {
2090 if (!isSingleSpacedVectorList())
return false;
2091 return VectorList.Count == 4;
2094 bool isVecListDPairSpaced()
const {
2095 if (Kind != k_VectorList)
return false;
2096 if (isSingleSpacedVectorList())
return false;
2097 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2101 bool isVecListThreeQ()
const {
2102 if (!isDoubleSpacedVectorList())
return false;
2103 return VectorList.Count == 3;
2106 bool isVecListFourQ()
const {
2107 if (!isDoubleSpacedVectorList())
return false;
2108 return VectorList.Count == 4;
2111 bool isVecListFourMQ()
const {
2112 return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2113 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2117 bool isSingleSpacedVectorAllLanes()
const {
2118 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2121 bool isDoubleSpacedVectorAllLanes()
const {
2122 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2125 bool isVecListOneDAllLanes()
const {
2126 if (!isSingleSpacedVectorAllLanes())
return false;
2127 return VectorList.Count == 1;
2130 bool isVecListDPairAllLanes()
const {
2131 if (!isSingleSpacedVectorAllLanes())
return false;
2132 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2136 bool isVecListDPairSpacedAllLanes()
const {
2137 if (!isDoubleSpacedVectorAllLanes())
return false;
2138 return VectorList.Count == 2;
2141 bool isVecListThreeDAllLanes()
const {
2142 if (!isSingleSpacedVectorAllLanes())
return false;
2143 return VectorList.Count == 3;
2146 bool isVecListThreeQAllLanes()
const {
2147 if (!isDoubleSpacedVectorAllLanes())
return false;
2148 return VectorList.Count == 3;
2151 bool isVecListFourDAllLanes()
const {
2152 if (!isSingleSpacedVectorAllLanes())
return false;
2153 return VectorList.Count == 4;
2156 bool isVecListFourQAllLanes()
const {
2157 if (!isDoubleSpacedVectorAllLanes())
return false;
2158 return VectorList.Count == 4;
2161 bool isSingleSpacedVectorIndexed()
const {
2162 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2165 bool isDoubleSpacedVectorIndexed()
const {
2166 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2169 bool isVecListOneDByteIndexed()
const {
2170 if (!isSingleSpacedVectorIndexed())
return false;
2171 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2174 bool isVecListOneDHWordIndexed()
const {
2175 if (!isSingleSpacedVectorIndexed())
return false;
2176 return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2179 bool isVecListOneDWordIndexed()
const {
2180 if (!isSingleSpacedVectorIndexed())
return false;
2181 return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2184 bool isVecListTwoDByteIndexed()
const {
2185 if (!isSingleSpacedVectorIndexed())
return false;
2186 return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2189 bool isVecListTwoDHWordIndexed()
const {
2190 if (!isSingleSpacedVectorIndexed())
return false;
2191 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2194 bool isVecListTwoQWordIndexed()
const {
2195 if (!isDoubleSpacedVectorIndexed())
return false;
2196 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2199 bool isVecListTwoQHWordIndexed()
const {
2200 if (!isDoubleSpacedVectorIndexed())
return false;
2201 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2204 bool isVecListTwoDWordIndexed()
const {
2205 if (!isSingleSpacedVectorIndexed())
return false;
2206 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2209 bool isVecListThreeDByteIndexed()
const {
2210 if (!isSingleSpacedVectorIndexed())
return false;
2211 return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2214 bool isVecListThreeDHWordIndexed()
const {
2215 if (!isSingleSpacedVectorIndexed())
return false;
2216 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2219 bool isVecListThreeQWordIndexed()
const {
2220 if (!isDoubleSpacedVectorIndexed())
return false;
2221 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2224 bool isVecListThreeQHWordIndexed()
const {
2225 if (!isDoubleSpacedVectorIndexed())
return false;
2226 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2229 bool isVecListThreeDWordIndexed()
const {
2230 if (!isSingleSpacedVectorIndexed())
return false;
2231 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2234 bool isVecListFourDByteIndexed()
const {
2235 if (!isSingleSpacedVectorIndexed())
return false;
2236 return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2239 bool isVecListFourDHWordIndexed()
const {
2240 if (!isSingleSpacedVectorIndexed())
return false;
2241 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2244 bool isVecListFourQWordIndexed()
const {
2245 if (!isDoubleSpacedVectorIndexed())
return false;
2246 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2249 bool isVecListFourQHWordIndexed()
const {
2250 if (!isDoubleSpacedVectorIndexed())
return false;
2251 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2254 bool isVecListFourDWordIndexed()
const {
2255 if (!isSingleSpacedVectorIndexed())
return false;
2256 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2259 bool isVectorIndex()
const {
return Kind == k_VectorIndex; }
2261 template <
unsigned NumLanes>
2262 bool isVectorIndexInRange()
const {
2263 if (Kind != k_VectorIndex)
return false;
2264 return VectorIndex.Val < NumLanes;
2267 bool isVectorIndex8()
const {
return isVectorIndexInRange<8>(); }
2268 bool isVectorIndex16()
const {
return isVectorIndexInRange<4>(); }
2269 bool isVectorIndex32()
const {
return isVectorIndexInRange<2>(); }
2270 bool isVectorIndex64()
const {
return isVectorIndexInRange<1>(); }
2272 template<
int PermittedValue,
int OtherPermittedValue>
2273 bool isMVEPairVectorIndex()
const {
2274 if (Kind != k_VectorIndex)
return false;
2275 return VectorIndex.Val == PermittedValue ||
2276 VectorIndex.Val == OtherPermittedValue;
2279 bool isNEONi8splat()
const {
2280 if (!isImm())
return false;
2283 if (!CE)
return false;
2284 int64_t
Value =
CE->getValue();
2291 if (isNEONByteReplicate(2))
2297 if (!CE)
return false;
2298 unsigned Value =
CE->getValue();
2302 bool isNEONi16splatNot()
const {
2307 if (!CE)
return false;
2308 unsigned Value =
CE->getValue();
2313 if (isNEONByteReplicate(4))
2319 if (!CE)
return false;
2320 unsigned Value =
CE->getValue();
2324 bool isNEONi32splatNot()
const {
2329 if (!CE)
return false;
2330 unsigned Value =
CE->getValue();
2334 static bool isValidNEONi32vmovImm(int64_t
Value) {
2337 return ((
Value & 0xffffffffffffff00) == 0) ||
2338 ((
Value & 0xffffffffffff00ff) == 0) ||
2339 ((
Value & 0xffffffffff00ffff) == 0) ||
2340 ((
Value & 0xffffffff00ffffff) == 0) ||
2341 ((
Value & 0xffffffffffff00ff) == 0xff) ||
2342 ((
Value & 0xffffffffff00ffff) == 0xffff);
2345 bool isNEONReplicate(
unsigned Width,
unsigned NumElems,
bool Inv)
const {
2346 assert((Width == 8 || Width == 16 || Width == 32) &&
2347 "Invalid element width");
2348 assert(NumElems * Width <= 64 &&
"Invalid result width");
2356 int64_t
Value =
CE->getValue();
2362 uint64_t
Mask = (1ull << Width) - 1;
2364 if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2366 if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2369 for (
unsigned i = 1; i < NumElems; ++i) {
2371 if ((
Value & Mask) != Elem)
2377 bool isNEONByteReplicate(
unsigned NumBytes)
const {
2378 return isNEONReplicate(8, NumBytes,
false);
2381 static void checkNeonReplicateArgs(
unsigned FromW,
unsigned ToW) {
2382 assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2383 "Invalid source width");
2384 assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2385 "Invalid destination width");
2386 assert(FromW < ToW &&
"ToW is not less than FromW");
2389 template<
unsigned FromW,
unsigned ToW>
2390 bool isNEONmovReplicate()
const {
2391 checkNeonReplicateArgs(FromW, ToW);
2392 if (ToW == 64 && isNEONi64splat())
2394 return isNEONReplicate(FromW, ToW / FromW,
false);
2397 template<
unsigned FromW,
unsigned ToW>
2398 bool isNEONinvReplicate()
const {
2399 checkNeonReplicateArgs(FromW, ToW);
2400 return isNEONReplicate(FromW, ToW / FromW,
true);
2403 bool isNEONi32vmov()
const {
2404 if (isNEONByteReplicate(4))
2412 return isValidNEONi32vmovImm(
CE->getValue());
2415 bool isNEONi32vmovNeg()
const {
2416 if (!isImm())
return false;
2419 if (!CE)
return false;
2420 return isValidNEONi32vmovImm(~
CE->getValue());
2423 bool isNEONi64splat()
const {
2424 if (!isImm())
return false;
2427 if (!CE)
return false;
2428 uint64_t
Value =
CE->getValue();
2430 for (
unsigned i = 0; i < 8; ++i, Value >>= 8)
2431 if ((
Value & 0xff) != 0 && (
Value & 0xff) != 0xff)
return false;
2435 template<
int64_t Angle,
int64_t Remainder>
2436 bool isComplexRotation()
const {
2437 if (!isImm())
return false;
2440 if (!CE)
return false;
2441 uint64_t
Value =
CE->getValue();
2443 return (
Value % Angle == Remainder &&
Value <= 270);
2446 bool isMVELongShift()
const {
2447 if (!isImm())
return false;
2450 if (!CE)
return false;
2451 uint64_t
Value =
CE->getValue();
2455 bool isMveSaturateOp()
const {
2456 if (!isImm())
return false;
2458 if (!CE)
return false;
2459 uint64_t
Value =
CE->getValue();
2463 bool isITCondCodeNoAL()
const {
2464 if (!isITCondCode())
return false;
2469 bool isITCondCodeRestrictedI()
const {
2470 if (!isITCondCode())
2476 bool isITCondCodeRestrictedS()
const {
2477 if (!isITCondCode())
2484 bool isITCondCodeRestrictedU()
const {
2485 if (!isITCondCode())
2491 bool isITCondCodeRestrictedFP()
const {
2492 if (!isITCondCode())
2499 void setVecListDPair(
unsigned int DPair) {
2500 Kind = k_VectorList;
2501 VectorList.RegNum = DPair;
2502 VectorList.Count = 2;
2503 VectorList.isDoubleSpaced =
false;
2506 void setVecListOneD(
unsigned int DReg) {
2507 Kind = k_VectorList;
2508 VectorList.RegNum =
DReg;
2509 VectorList.Count = 1;
2510 VectorList.isDoubleSpaced =
false;
2513 void addExpr(MCInst &Inst,
const MCExpr *Expr)
const {
2523 void addARMBranchTargetOperands(MCInst &Inst,
unsigned N)
const {
2524 assert(
N == 1 &&
"Invalid number of operands!");
2528 void addThumbBranchTargetOperands(MCInst &Inst,
unsigned N)
const {
2529 assert(
N == 1 &&
"Invalid number of operands!");
2533 void addCondCodeOperands(MCInst &Inst,
unsigned N)
const {
2534 assert(
N == 2 &&
"Invalid number of operands!");
2540 void addVPTPredNOperands(MCInst &Inst,
unsigned N)
const {
2541 assert(
N == 3 &&
"Invalid number of operands!");
2543 unsigned RegNum = getVPTPred() ==
ARMVCC::None ? ARM::NoRegister : ARM::P0;
2548 void addVPTPredROperands(MCInst &Inst,
unsigned N)
const {
2549 assert(
N == 4 &&
"Invalid number of operands!");
2550 addVPTPredNOperands(Inst,
N-1);
2553 RegNum = ARM::NoRegister;
2556 auto &MCID = Parser->getInstrDesc(Inst.
getOpcode());
2557 int TiedOp = MCID.getOperandConstraint(NextOpIndex,
MCOI::TIED_TO);
2559 "Inactive register in vpred_r is not tied to an output!");
2565 void addCoprocNumOperands(MCInst &Inst,
unsigned N)
const {
2566 assert(
N == 1 &&
"Invalid number of operands!");
2570 void addCoprocRegOperands(MCInst &Inst,
unsigned N)
const {
2571 assert(
N == 1 &&
"Invalid number of operands!");
2575 void addCoprocOptionOperands(MCInst &Inst,
unsigned N)
const {
2576 assert(
N == 1 &&
"Invalid number of operands!");
2580 void addITMaskOperands(MCInst &Inst,
unsigned N)
const {
2581 assert(
N == 1 &&
"Invalid number of operands!");
2585 void addITCondCodeOperands(MCInst &Inst,
unsigned N)
const {
2586 assert(
N == 1 &&
"Invalid number of operands!");
2590 void addITCondCodeInvOperands(MCInst &Inst,
unsigned N)
const {
2591 assert(
N == 1 &&
"Invalid number of operands!");
2595 void addCCOutOperands(MCInst &Inst,
unsigned N)
const {
2596 assert(
N == 1 &&
"Invalid number of operands!");
2600 void addRegOperands(MCInst &Inst,
unsigned N)
const {
2601 assert(
N == 1 &&
"Invalid number of operands!");
2605 void addRegShiftedRegOperands(MCInst &Inst,
unsigned N)
const {
2606 assert(
N == 3 &&
"Invalid number of operands!");
2607 assert(isRegShiftedReg() &&
2608 "addRegShiftedRegOperands() on non-RegShiftedReg!");
2615 void addRegShiftedImmOperands(MCInst &Inst,
unsigned N)
const {
2616 assert(
N == 2 &&
"Invalid number of operands!");
2617 assert(isRegShiftedImm() &&
2618 "addRegShiftedImmOperands() on non-RegShiftedImm!");
2621 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2626 void addShifterImmOperands(MCInst &Inst,
unsigned N)
const {
2627 assert(
N == 1 &&
"Invalid number of operands!");
2632 void addRegListOperands(MCInst &Inst,
unsigned N)
const {
2633 assert(
N == 1 &&
"Invalid number of operands!");
2634 const SmallVectorImpl<MCRegister> &RegList = getRegList();
2635 for (MCRegister
Reg : RegList)
2639 void addRegListWithAPSROperands(MCInst &Inst,
unsigned N)
const {
2640 assert(
N == 1 &&
"Invalid number of operands!");
2641 const SmallVectorImpl<MCRegister> &RegList = getRegList();
2642 for (MCRegister
Reg : RegList)
2646 void addDPRRegListOperands(MCInst &Inst,
unsigned N)
const {
2647 addRegListOperands(Inst,
N);
2650 void addSPRRegListOperands(MCInst &Inst,
unsigned N)
const {
2651 addRegListOperands(Inst,
N);
2654 void addFPSRegListWithVPROperands(MCInst &Inst,
unsigned N)
const {
2655 addRegListOperands(Inst,
N);
2658 void addFPDRegListWithVPROperands(MCInst &Inst,
unsigned N)
const {
2659 addRegListOperands(Inst,
N);
2662 void addRotImmOperands(MCInst &Inst,
unsigned N)
const {
2663 assert(
N == 1 &&
"Invalid number of operands!");
2668 void addModImmOperands(MCInst &Inst,
unsigned N)
const {
2669 assert(
N == 1 &&
"Invalid number of operands!");
2673 return addImmOperands(Inst,
N);
2678 void addModImmNotOperands(MCInst &Inst,
unsigned N)
const {
2679 assert(
N == 1 &&
"Invalid number of operands!");
2685 void addModImmNegOperands(MCInst &Inst,
unsigned N)
const {
2686 assert(
N == 1 &&
"Invalid number of operands!");
2692 void addThumbModImmNeg8_255Operands(MCInst &Inst,
unsigned N)
const {
2693 assert(
N == 1 &&
"Invalid number of operands!");
2695 uint32_t Val = -
CE->getValue();
2699 void addThumbModImmNeg1_7Operands(MCInst &Inst,
unsigned N)
const {
2700 assert(
N == 1 &&
"Invalid number of operands!");
2702 uint32_t Val = -
CE->getValue();
2706 void addBitfieldOperands(MCInst &Inst,
unsigned N)
const {
2707 assert(
N == 1 &&
"Invalid number of operands!");
2712 uint32_t
Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2713 (32 - (lsb + width)));
2717 void addImmOperands(MCInst &Inst,
unsigned N)
const {
2718 assert(
N == 1 &&
"Invalid number of operands!");
2722 void addFBits16Operands(MCInst &Inst,
unsigned N)
const {
2723 assert(
N == 1 &&
"Invalid number of operands!");
2728 void addFBits32Operands(MCInst &Inst,
unsigned N)
const {
2729 assert(
N == 1 &&
"Invalid number of operands!");
2734 void addFPImmOperands(MCInst &Inst,
unsigned N)
const {
2735 assert(
N == 1 &&
"Invalid number of operands!");
2741 void addImm8s4Operands(MCInst &Inst,
unsigned N)
const {
2742 assert(
N == 1 &&
"Invalid number of operands!");
2749 void addImm7s4Operands(MCInst &Inst,
unsigned N)
const {
2750 assert(
N == 1 &&
"Invalid number of operands!");
2757 void addImm7Shift0Operands(MCInst &Inst,
unsigned N)
const {
2758 assert(
N == 1 &&
"Invalid number of operands!");
2763 void addImm7Shift1Operands(MCInst &Inst,
unsigned N)
const {
2764 assert(
N == 1 &&
"Invalid number of operands!");
2769 void addImm7Shift2Operands(MCInst &Inst,
unsigned N)
const {
2770 assert(
N == 1 &&
"Invalid number of operands!");
2775 void addImm7Operands(MCInst &Inst,
unsigned N)
const {
2776 assert(
N == 1 &&
"Invalid number of operands!");
2781 void addImm0_1020s4Operands(MCInst &Inst,
unsigned N)
const {
2782 assert(
N == 1 &&
"Invalid number of operands!");
2789 void addImm0_508s4NegOperands(MCInst &Inst,
unsigned N)
const {
2790 assert(
N == 1 &&
"Invalid number of operands!");
2797 void addImm0_508s4Operands(MCInst &Inst,
unsigned N)
const {
2798 assert(
N == 1 &&
"Invalid number of operands!");
2805 void addImm1_16Operands(MCInst &Inst,
unsigned N)
const {
2806 assert(
N == 1 &&
"Invalid number of operands!");
2813 void addImm1_32Operands(MCInst &Inst,
unsigned N)
const {
2814 assert(
N == 1 &&
"Invalid number of operands!");
2821 void addImmThumbSROperands(MCInst &Inst,
unsigned N)
const {
2822 assert(
N == 1 &&
"Invalid number of operands!");
2826 unsigned Imm =
CE->getValue();
2830 void addPKHASRImmOperands(MCInst &Inst,
unsigned N)
const {
2831 assert(
N == 1 &&
"Invalid number of operands!");
2835 int Val =
CE->getValue();
2839 void addT2SOImmNotOperands(MCInst &Inst,
unsigned N)
const {
2840 assert(
N == 1 &&
"Invalid number of operands!");
2847 void addT2SOImmNegOperands(MCInst &Inst,
unsigned N)
const {
2848 assert(
N == 1 &&
"Invalid number of operands!");
2855 void addImm0_4095NegOperands(MCInst &Inst,
unsigned N)
const {
2856 assert(
N == 1 &&
"Invalid number of operands!");
2863 void addUnsignedOffset_b8s2Operands(MCInst &Inst,
unsigned N)
const {
2872 void addThumbMemPCOperands(MCInst &Inst,
unsigned N)
const {
2873 assert(
N == 1 &&
"Invalid number of operands!");
2885 assert(isGPRMem() &&
"Unknown value type!");
2893 void addMemBarrierOptOperands(MCInst &Inst,
unsigned N)
const {
2894 assert(
N == 1 &&
"Invalid number of operands!");
2898 void addInstSyncBarrierOptOperands(MCInst &Inst,
unsigned N)
const {
2899 assert(
N == 1 &&
"Invalid number of operands!");
2903 void addTraceSyncBarrierOptOperands(MCInst &Inst,
unsigned N)
const {
2904 assert(
N == 1 &&
"Invalid number of operands!");
2908 void addMemNoOffsetOperands(MCInst &Inst,
unsigned N)
const {
2909 assert(
N == 1 &&
"Invalid number of operands!");
2913 void addMemNoOffsetT2Operands(MCInst &Inst,
unsigned N)
const {
2914 assert(
N == 1 &&
"Invalid number of operands!");
2918 void addMemNoOffsetT2NoSpOperands(MCInst &Inst,
unsigned N)
const {
2919 assert(
N == 1 &&
"Invalid number of operands!");
2923 void addMemNoOffsetTOperands(MCInst &Inst,
unsigned N)
const {
2924 assert(
N == 1 &&
"Invalid number of operands!");
2928 void addMemPCRelImm12Operands(MCInst &Inst,
unsigned N)
const {
2929 assert(
N == 1 &&
"Invalid number of operands!");
2936 void addAdrLabelOperands(MCInst &Inst,
unsigned N)
const {
2937 assert(
N == 1 &&
"Invalid number of operands!");
2938 assert(isImm() &&
"Not an immediate!");
2948 int Val =
CE->getValue();
2952 void addAlignedMemoryOperands(MCInst &Inst,
unsigned N)
const {
2953 assert(
N == 2 &&
"Invalid number of operands!");
2958 void addDupAlignedMemoryNoneOperands(MCInst &Inst,
unsigned N)
const {
2959 addAlignedMemoryOperands(Inst,
N);
2962 void addAlignedMemoryNoneOperands(MCInst &Inst,
unsigned N)
const {
2963 addAlignedMemoryOperands(Inst,
N);
2966 void addAlignedMemory16Operands(MCInst &Inst,
unsigned N)
const {
2967 addAlignedMemoryOperands(Inst,
N);
2970 void addDupAlignedMemory16Operands(MCInst &Inst,
unsigned N)
const {
2971 addAlignedMemoryOperands(Inst,
N);
2974 void addAlignedMemory32Operands(MCInst &Inst,
unsigned N)
const {
2975 addAlignedMemoryOperands(Inst,
N);
2978 void addDupAlignedMemory32Operands(MCInst &Inst,
unsigned N)
const {
2979 addAlignedMemoryOperands(Inst,
N);
2982 void addAlignedMemory64Operands(MCInst &Inst,
unsigned N)
const {
2983 addAlignedMemoryOperands(Inst,
N);
2986 void addDupAlignedMemory64Operands(MCInst &Inst,
unsigned N)
const {
2987 addAlignedMemoryOperands(Inst,
N);
2990 void addAlignedMemory64or128Operands(MCInst &Inst,
unsigned N)
const {
2991 addAlignedMemoryOperands(Inst,
N);
2994 void addDupAlignedMemory64or128Operands(MCInst &Inst,
unsigned N)
const {
2995 addAlignedMemoryOperands(Inst,
N);
2998 void addAlignedMemory64or128or256Operands(MCInst &Inst,
unsigned N)
const {
2999 addAlignedMemoryOperands(Inst,
N);
3002 void addAddrMode2Operands(MCInst &Inst,
unsigned N)
const {
3003 assert(
N == 3 &&
"Invalid number of operands!");
3006 if (!Memory.OffsetRegNum) {
3007 if (!Memory.OffsetImm)
3010 int32_t Val =
CE->getValue();
3013 if (Val == std::numeric_limits<int32_t>::min())
3026 Memory.ShiftImm, Memory.ShiftType);
3031 void addAM2OffsetImmOperands(MCInst &Inst,
unsigned N)
const {
3032 assert(
N == 2 &&
"Invalid number of operands!");
3034 assert(CE &&
"non-constant AM2OffsetImm operand!");
3035 int32_t Val =
CE->getValue();
3038 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3039 if (Val < 0) Val = -Val;
3045 void addAddrMode3Operands(MCInst &Inst,
unsigned N)
const {
3046 assert(
N == 3 &&
"Invalid number of operands!");
3059 if (!Memory.OffsetRegNum) {
3060 if (!Memory.OffsetImm)
3063 int32_t Val =
CE->getValue();
3066 if (Val == std::numeric_limits<int32_t>::min())
3083 void addAM3OffsetOperands(MCInst &Inst,
unsigned N)
const {
3084 assert(
N == 2 &&
"Invalid number of operands!");
3085 if (Kind == k_PostIndexRegister) {
3094 const MCConstantExpr *
CE =
static_cast<const MCConstantExpr*
>(
getImm());
3095 int32_t Val =
CE->getValue();
3098 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3099 if (Val < 0) Val = -Val;
3105 void addAddrMode5Operands(MCInst &Inst,
unsigned N)
const {
3106 assert(
N == 2 &&
"Invalid number of operands!");
3117 if (!Memory.OffsetImm)
3121 int32_t Val =
CE->getValue() / 4;
3124 if (Val == std::numeric_limits<int32_t>::min())
3134 void addAddrMode5FP16Operands(MCInst &Inst,
unsigned N)
const {
3135 assert(
N == 2 &&
"Invalid number of operands!");
3147 if (!Memory.OffsetImm)
3150 int32_t Val =
CE->getValue() / 2;
3153 if (Val == std::numeric_limits<int32_t>::min())
3163 void addMemImm8s4OffsetOperands(MCInst &Inst,
unsigned N)
const {
3164 assert(
N == 2 &&
"Invalid number of operands!");
3175 addExpr(Inst, Memory.OffsetImm);
3178 void addMemImm7s4OffsetOperands(MCInst &Inst,
unsigned N)
const {
3179 assert(
N == 2 &&
"Invalid number of operands!");
3190 addExpr(Inst, Memory.OffsetImm);
3193 void addMemImm0_1020s4OffsetOperands(MCInst &Inst,
unsigned N)
const {
3194 assert(
N == 2 &&
"Invalid number of operands!");
3196 if (!Memory.OffsetImm)
3205 void addMemImmOffsetOperands(MCInst &Inst,
unsigned N)
const {
3206 assert(
N == 2 &&
"Invalid number of operands!");
3208 addExpr(Inst, Memory.OffsetImm);
3211 void addMemRegRQOffsetOperands(MCInst &Inst,
unsigned N)
const {
3212 assert(
N == 2 &&
"Invalid number of operands!");
3217 void addMemUImm12OffsetOperands(MCInst &Inst,
unsigned N)
const {
3218 assert(
N == 2 &&
"Invalid number of operands!");
3228 addExpr(Inst, Memory.OffsetImm);
3231 void addMemImm12OffsetOperands(MCInst &Inst,
unsigned N)
const {
3232 assert(
N == 2 &&
"Invalid number of operands!");
3242 addExpr(Inst, Memory.OffsetImm);
3245 void addConstPoolAsmImmOperands(MCInst &Inst,
unsigned N)
const {
3246 assert(
N == 1 &&
"Invalid number of operands!");
3249 addExpr(Inst, getConstantPoolImm());
3252 void addMemTBBOperands(MCInst &Inst,
unsigned N)
const {
3253 assert(
N == 2 &&
"Invalid number of operands!");
3258 void addMemTBHOperands(MCInst &Inst,
unsigned N)
const {
3259 assert(
N == 2 &&
"Invalid number of operands!");
3264 void addMemRegOffsetOperands(MCInst &Inst,
unsigned N)
const {
3265 assert(
N == 3 &&
"Invalid number of operands!");
3268 Memory.ShiftImm, Memory.ShiftType);
3274 void addT2MemRegOffsetOperands(MCInst &Inst,
unsigned N)
const {
3275 assert(
N == 3 &&
"Invalid number of operands!");
3281 void addMemThumbRROperands(MCInst &Inst,
unsigned N)
const {
3282 assert(
N == 2 &&
"Invalid number of operands!");
3287 void addMemThumbRIs4Operands(MCInst &Inst,
unsigned N)
const {
3288 assert(
N == 2 &&
"Invalid number of operands!");
3290 if (!Memory.OffsetImm)
3299 void addMemThumbRIs2Operands(MCInst &Inst,
unsigned N)
const {
3300 assert(
N == 2 &&
"Invalid number of operands!");
3302 if (!Memory.OffsetImm)
3310 void addMemThumbRIs1Operands(MCInst &Inst,
unsigned N)
const {
3311 assert(
N == 2 &&
"Invalid number of operands!");
3313 addExpr(Inst, Memory.OffsetImm);
3316 void addMemThumbSPIOperands(MCInst &Inst,
unsigned N)
const {
3317 assert(
N == 2 &&
"Invalid number of operands!");
3319 if (!Memory.OffsetImm)
3328 void addPostIdxImm8Operands(MCInst &Inst,
unsigned N)
const {
3329 assert(
N == 1 &&
"Invalid number of operands!");
3331 assert(CE &&
"non-constant post-idx-imm8 operand!");
3332 int Imm =
CE->getValue();
3333 bool isAdd =
Imm >= 0;
3334 if (Imm == std::numeric_limits<int32_t>::min())
Imm = 0;
3339 void addPostIdxImm8s4Operands(MCInst &Inst,
unsigned N)
const {
3340 assert(
N == 1 &&
"Invalid number of operands!");
3342 assert(CE &&
"non-constant post-idx-imm8s4 operand!");
3343 int Imm =
CE->getValue();
3344 bool isAdd =
Imm >= 0;
3345 if (Imm == std::numeric_limits<int32_t>::min())
Imm = 0;
3351 void addPostIdxRegOperands(MCInst &Inst,
unsigned N)
const {
3352 assert(
N == 2 &&
"Invalid number of operands!");
3357 void addPostIdxRegShiftedOperands(MCInst &Inst,
unsigned N)
const {
3358 assert(
N == 2 &&
"Invalid number of operands!");
3364 PostIdxReg.ShiftTy);
3368 void addPowerTwoOperands(MCInst &Inst,
unsigned N)
const {
3369 assert(
N == 1 &&
"Invalid number of operands!");
3374 void addMSRMaskOperands(MCInst &Inst,
unsigned N)
const {
3375 assert(
N == 1 &&
"Invalid number of operands!");
3379 void addBankedRegOperands(MCInst &Inst,
unsigned N)
const {
3380 assert(
N == 1 &&
"Invalid number of operands!");
3384 void addProcIFlagsOperands(MCInst &Inst,
unsigned N)
const {
3385 assert(
N == 1 &&
"Invalid number of operands!");
3389 void addVecListOperands(MCInst &Inst,
unsigned N)
const {
3390 assert(
N == 1 &&
"Invalid number of operands!");
3392 if (isAnyVectorList())
3394 else if (isDReg() && !Parser->hasMVE()) {
3396 }
else if (isQReg() && !Parser->hasMVE()) {
3397 MCRegister DPair = Parser->getDRegFromQReg(
Reg.RegNum);
3398 DPair = Parser->getMRI()->getMatchingSuperReg(
3399 DPair, ARM::dsub_0, &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3404 "attempted to add a vector list register with wrong type!");
3408 void addMVEVecListOperands(MCInst &Inst,
unsigned N)
const {
3409 assert(
N == 1 &&
"Invalid number of operands!");
3425 const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3426 const MCRegisterClass *RC_out =
3427 (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3428 : &ARMMCRegisterClasses[
ARM::MQQQQPRRegClassID];
3431 for (
I = 0;
I <
E;
I++)
3434 assert(
I <
E &&
"Invalid vector list start register!");
3439 void addVecListIndexedOperands(MCInst &Inst,
unsigned N)
const {
3440 assert(
N == 2 &&
"Invalid number of operands!");
3445 void addVectorIndex8Operands(MCInst &Inst,
unsigned N)
const {
3446 assert(
N == 1 &&
"Invalid number of operands!");
3450 void addVectorIndex16Operands(MCInst &Inst,
unsigned N)
const {
3451 assert(
N == 1 &&
"Invalid number of operands!");
3455 void addVectorIndex32Operands(MCInst &Inst,
unsigned N)
const {
3456 assert(
N == 1 &&
"Invalid number of operands!");
3460 void addVectorIndex64Operands(MCInst &Inst,
unsigned N)
const {
3461 assert(
N == 1 &&
"Invalid number of operands!");
3465 void addMVEVectorIndexOperands(MCInst &Inst,
unsigned N)
const {
3466 assert(
N == 1 &&
"Invalid number of operands!");
3470 void addMVEPairVectorIndexOperands(MCInst &Inst,
unsigned N)
const {
3471 assert(
N == 1 &&
"Invalid number of operands!");
3475 void addNEONi8splatOperands(MCInst &Inst,
unsigned N)
const {
3476 assert(
N == 1 &&
"Invalid number of operands!");
3483 void addNEONi16splatOperands(MCInst &Inst,
unsigned N)
const {
3484 assert(
N == 1 &&
"Invalid number of operands!");
3487 unsigned Value =
CE->getValue();
3492 void addNEONi16splatNotOperands(MCInst &Inst,
unsigned N)
const {
3493 assert(
N == 1 &&
"Invalid number of operands!");
3496 unsigned Value =
CE->getValue();
3501 void addNEONi32splatOperands(MCInst &Inst,
unsigned N)
const {
3502 assert(
N == 1 &&
"Invalid number of operands!");
3505 unsigned Value =
CE->getValue();
3510 void addNEONi32splatNotOperands(MCInst &Inst,
unsigned N)
const {
3511 assert(
N == 1 &&
"Invalid number of operands!");
3514 unsigned Value =
CE->getValue();
3519 void addNEONi8ReplicateOperands(MCInst &Inst,
bool Inv)
const {
3524 "All instructions that wants to replicate non-zero byte "
3525 "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3526 unsigned Value =
CE->getValue();
3529 unsigned B =
Value & 0xff;
3534 void addNEONinvi8ReplicateOperands(MCInst &Inst,
unsigned N)
const {
3535 assert(
N == 1 &&
"Invalid number of operands!");
3536 addNEONi8ReplicateOperands(Inst,
true);
3539 static unsigned encodeNeonVMOVImmediate(
unsigned Value) {
3542 else if (
Value > 0xffff &&
Value <= 0xffffff)
3544 else if (
Value > 0xffffff)
3549 void addNEONi32vmovOperands(MCInst &Inst,
unsigned N)
const {
3550 assert(
N == 1 &&
"Invalid number of operands!");
3553 unsigned Value = encodeNeonVMOVImmediate(
CE->getValue());
3557 void addNEONvmovi8ReplicateOperands(MCInst &Inst,
unsigned N)
const {
3558 assert(
N == 1 &&
"Invalid number of operands!");
3559 addNEONi8ReplicateOperands(Inst,
false);
3562 void addNEONvmovi16ReplicateOperands(MCInst &Inst,
unsigned N)
const {
3563 assert(
N == 1 &&
"Invalid number of operands!");
3569 "All instructions that want to replicate non-zero half-word "
3570 "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3571 uint64_t
Value =
CE->getValue();
3572 unsigned Elem =
Value & 0xffff;
3574 Elem = (Elem >> 8) | 0x200;
3578 void addNEONi32vmovNegOperands(MCInst &Inst,
unsigned N)
const {
3579 assert(
N == 1 &&
"Invalid number of operands!");
3582 unsigned Value = encodeNeonVMOVImmediate(~
CE->getValue());
3586 void addNEONvmovi32ReplicateOperands(MCInst &Inst,
unsigned N)
const {
3587 assert(
N == 1 &&
"Invalid number of operands!");
3593 "All instructions that want to replicate non-zero word "
3594 "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3595 uint64_t
Value =
CE->getValue();
3596 unsigned Elem = encodeNeonVMOVImmediate(
Value & 0xffffffff);
3600 void addNEONi64splatOperands(MCInst &Inst,
unsigned N)
const {
3601 assert(
N == 1 &&
"Invalid number of operands!");
3604 uint64_t
Value =
CE->getValue();
3606 for (
unsigned i = 0; i < 8; ++i, Value >>= 8) {
3612 void addComplexRotationEvenOperands(MCInst &Inst,
unsigned N)
const {
3613 assert(
N == 1 &&
"Invalid number of operands!");
3618 void addComplexRotationOddOperands(MCInst &Inst,
unsigned N)
const {
3619 assert(
N == 1 &&
"Invalid number of operands!");
3624 void addMveSaturateOperands(MCInst &Inst,
unsigned N)
const {
3625 assert(
N == 1 &&
"Invalid number of operands!");
3627 unsigned Imm =
CE->getValue();
3628 assert((Imm == 48 || Imm == 64) &&
"Invalid saturate operand");
3632 void print(raw_ostream &OS,
const MCAsmInfo &MAI)
const override;
3634 static std::unique_ptr<ARMOperand> CreateITMask(
unsigned Mask, SMLoc S,
3635 ARMAsmParser &Parser) {
3636 auto Op = std::make_unique<ARMOperand>(k_ITCondMask, Parser);
3643 static std::unique_ptr<ARMOperand>
3645 auto Op = std::make_unique<ARMOperand>(k_CondCode, Parser);
3652 static std::unique_ptr<ARMOperand> CreateVPTPred(
ARMVCC::VPTCodes CC, SMLoc S,
3653 ARMAsmParser &Parser) {
3654 auto Op = std::make_unique<ARMOperand>(k_VPTPred, Parser);
3661 static std::unique_ptr<ARMOperand> CreateCoprocNum(
unsigned CopVal, SMLoc S,
3662 ARMAsmParser &Parser) {
3663 auto Op = std::make_unique<ARMOperand>(k_CoprocNum, Parser);
3664 Op->Cop.Val = CopVal;
3670 static std::unique_ptr<ARMOperand> CreateCoprocReg(
unsigned CopVal, SMLoc S,
3671 ARMAsmParser &Parser) {
3672 auto Op = std::make_unique<ARMOperand>(k_CoprocReg, Parser);
3673 Op->Cop.Val = CopVal;
3679 static std::unique_ptr<ARMOperand>
3680 CreateCoprocOption(
unsigned Val, SMLoc S, SMLoc
E, ARMAsmParser &Parser) {
3681 auto Op = std::make_unique<ARMOperand>(k_CoprocOption, Parser);
3688 static std::unique_ptr<ARMOperand> CreateCCOut(MCRegister
Reg, SMLoc S,
3689 ARMAsmParser &Parser) {
3690 auto Op = std::make_unique<ARMOperand>(k_CCOut, Parser);
3691 Op->Reg.RegNum =
Reg;
3697 static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S,
3698 ARMAsmParser &Parser) {
3699 auto Op = std::make_unique<ARMOperand>(k_Token, Parser);
3700 Op->Tok.Data = Str.data();
3701 Op->Tok.Length = Str.size();
3707 static std::unique_ptr<ARMOperand> CreateReg(MCRegister
Reg, SMLoc S, SMLoc
E,
3708 ARMAsmParser &Parser) {
3709 auto Op = std::make_unique<ARMOperand>(k_Register, Parser);
3710 Op->Reg.RegNum =
Reg;
3716 static std::unique_ptr<ARMOperand>
3718 MCRegister ShiftReg,
unsigned ShiftImm, SMLoc S,
3719 SMLoc
E, ARMAsmParser &Parser) {
3720 auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister, Parser);
3721 Op->RegShiftedReg.ShiftTy = ShTy;
3722 Op->RegShiftedReg.SrcReg = SrcReg;
3723 Op->RegShiftedReg.ShiftReg = ShiftReg;
3724 Op->RegShiftedReg.ShiftImm = ShiftImm;
3730 static std::unique_ptr<ARMOperand>
3732 unsigned ShiftImm, SMLoc S, SMLoc
E,
3733 ARMAsmParser &Parser) {
3734 auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate, Parser);
3735 Op->RegShiftedImm.ShiftTy = ShTy;
3736 Op->RegShiftedImm.SrcReg = SrcReg;
3737 Op->RegShiftedImm.ShiftImm = ShiftImm;
3743 static std::unique_ptr<ARMOperand> CreateShifterImm(
bool isASR,
unsigned Imm,
3745 ARMAsmParser &Parser) {
3746 auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate, Parser);
3747 Op->ShifterImm.isASR = isASR;
3748 Op->ShifterImm.Imm =
Imm;
3754 static std::unique_ptr<ARMOperand>
3755 CreateRotImm(
unsigned Imm, SMLoc S, SMLoc
E, ARMAsmParser &Parser) {
3756 auto Op = std::make_unique<ARMOperand>(k_RotateImmediate, Parser);
3757 Op->RotImm.Imm =
Imm;
3763 static std::unique_ptr<ARMOperand> CreateModImm(
unsigned Bits,
unsigned Rot,
3765 ARMAsmParser &Parser) {
3766 auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate, Parser);
3768 Op->ModImm.Rot = Rot;
3774 static std::unique_ptr<ARMOperand>
3775 CreateConstantPoolImm(
const MCExpr *Val, SMLoc S, SMLoc
E,
3776 ARMAsmParser &Parser) {
3777 auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate, Parser);
3784 static std::unique_ptr<ARMOperand> CreateBitfield(
unsigned LSB,
3785 unsigned Width, SMLoc S,
3787 ARMAsmParser &Parser) {
3788 auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor, Parser);
3789 Op->Bitfield.LSB = LSB;
3790 Op->Bitfield.Width = Width;
3796 static std::unique_ptr<ARMOperand>
3797 CreateRegList(SmallVectorImpl<std::pair<unsigned, MCRegister>> &Regs,
3798 SMLoc StartLoc, SMLoc EndLoc, ARMAsmParser &Parser) {
3799 assert(Regs.size() > 0 &&
"RegList contains no registers?");
3800 KindTy Kind = k_RegisterList;
3802 if (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
3803 Regs.front().second)) {
3804 if (Regs.back().second == ARM::VPR)
3805 Kind = k_FPDRegisterListWithVPR;
3807 Kind = k_DPRRegisterList;
3808 }
else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
contains(
3809 Regs.front().second)) {
3810 if (Regs.back().second == ARM::VPR)
3811 Kind = k_FPSRegisterListWithVPR;
3813 Kind = k_SPRRegisterList;
3814 }
else if (Regs.front().second == ARM::VPR) {
3815 assert(Regs.size() == 1 &&
3816 "Register list starting with VPR expected to only contain VPR");
3817 Kind = k_FPSRegisterListWithVPR;
3820 if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3821 Kind = k_RegisterListWithAPSR;
3825 auto Op = std::make_unique<ARMOperand>(Kind, Parser);
3826 for (
const auto &
P : Regs)
3827 Op->Registers.push_back(
P.second);
3829 Op->StartLoc = StartLoc;
3830 Op->EndLoc = EndLoc;
3834 static std::unique_ptr<ARMOperand>
3835 CreateVectorList(MCRegister
Reg,
unsigned Count,
bool isDoubleSpaced, SMLoc S,
3836 SMLoc
E, ARMAsmParser &Parser) {
3837 auto Op = std::make_unique<ARMOperand>(k_VectorList, Parser);
3838 Op->VectorList.RegNum =
Reg;
3840 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3846 static std::unique_ptr<ARMOperand>
3847 CreateVectorListAllLanes(MCRegister
Reg,
unsigned Count,
bool isDoubleSpaced,
3848 SMLoc S, SMLoc
E, ARMAsmParser &Parser) {
3849 auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes, Parser);
3850 Op->VectorList.RegNum =
Reg;
3852 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3858 static std::unique_ptr<ARMOperand>
3859 CreateVectorListIndexed(MCRegister
Reg,
unsigned Count,
unsigned Index,
3860 bool isDoubleSpaced, SMLoc S, SMLoc
E,
3861 ARMAsmParser &Parser) {
3862 auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed, Parser);
3863 Op->VectorList.RegNum =
Reg;
3865 Op->VectorList.LaneIndex =
Index;
3866 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3872 static std::unique_ptr<ARMOperand> CreateVectorIndex(
unsigned Idx, SMLoc S,
3873 SMLoc
E, MCContext &Ctx,
3874 ARMAsmParser &Parser) {
3875 auto Op = std::make_unique<ARMOperand>(k_VectorIndex, Parser);
3876 Op->VectorIndex.Val = Idx;
3882 static std::unique_ptr<ARMOperand> CreateImm(
const MCExpr *Val, SMLoc S,
3883 SMLoc
E, ARMAsmParser &Parser) {
3884 auto Op = std::make_unique<ARMOperand>(k_Immediate, Parser);
3891 static std::unique_ptr<ARMOperand>
3892 CreateMem(MCRegister BaseReg,
const MCExpr *OffsetImm, MCRegister OffsetReg,
3894 bool isNegative, SMLoc S, SMLoc
E, ARMAsmParser &Parser,
3895 SMLoc AlignmentLoc = SMLoc()) {
3896 auto Op = std::make_unique<ARMOperand>(k_Memory, Parser);
3898 Op->Memory.OffsetImm = OffsetImm;
3899 Op->Memory.OffsetRegNum = OffsetReg;
3900 Op->Memory.ShiftType = ShiftType;
3901 Op->Memory.ShiftImm = ShiftImm;
3902 Op->Memory.Alignment = Alignment;
3903 Op->Memory.isNegative = isNegative;
3906 Op->AlignmentLoc = AlignmentLoc;
3910 static std::unique_ptr<ARMOperand>
3912 unsigned ShiftImm, SMLoc S, SMLoc
E, ARMAsmParser &Parser) {
3913 auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister, Parser);
3914 Op->PostIdxReg.RegNum =
Reg;
3915 Op->PostIdxReg.isAdd = isAdd;
3916 Op->PostIdxReg.ShiftTy = ShiftTy;
3917 Op->PostIdxReg.ShiftImm = ShiftImm;
3923 static std::unique_ptr<ARMOperand>
3924 CreateMemBarrierOpt(
ARM_MB::MemBOpt Opt, SMLoc S, ARMAsmParser &Parser) {
3925 auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt, Parser);
3926 Op->MBOpt.Val = Opt;
3932 static std::unique_ptr<ARMOperand>
3934 ARMAsmParser &Parser) {
3935 auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt, Parser);
3936 Op->ISBOpt.Val = Opt;
3942 static std::unique_ptr<ARMOperand>
3944 ARMAsmParser &Parser) {
3945 auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt, Parser);
3946 Op->TSBOpt.Val = Opt;
3952 static std::unique_ptr<ARMOperand>
3954 auto Op = std::make_unique<ARMOperand>(k_ProcIFlags, Parser);
3961 static std::unique_ptr<ARMOperand> CreateMSRMask(
unsigned MMask, SMLoc S,
3962 ARMAsmParser &Parser) {
3963 auto Op = std::make_unique<ARMOperand>(k_MSRMask, Parser);
3964 Op->MMask.Val = MMask;
3970 static std::unique_ptr<ARMOperand> CreateBankedReg(
unsigned Reg, SMLoc S,
3971 ARMAsmParser &Parser) {
3972 auto Op = std::make_unique<ARMOperand>(k_BankedReg, Parser);
3973 Op->BankedReg.Val =
Reg;
3982void ARMOperand::print(raw_ostream &OS,
const MCAsmInfo &MAI)
const {
4000 case k_ITCondMask: {
4001 static const char *
const MaskStr[] = {
4002 "(invalid)",
"(tttt)",
"(ttt)",
"(ttte)",
4003 "(tt)",
"(ttet)",
"(tte)",
"(ttee)",
4004 "(t)",
"(tett)",
"(tet)",
"(tete)",
4005 "(te)",
"(teet)",
"(tee)",
"(teee)",
4007 assert((ITMask.Mask & 0xf) == ITMask.Mask);
4008 OS <<
"<it-mask " << MaskStr[ITMask.Mask] <<
">";
4012 OS <<
"<coprocessor number: " << getCoproc() <<
">";
4015 OS <<
"<coprocessor register: " << getCoproc() <<
">";
4017 case k_CoprocOption:
4018 OS <<
"<coprocessor option: " << CoprocOption.Val <<
">";
4021 OS <<
"<mask: " << getMSRMask() <<
">";
4024 OS <<
"<banked reg: " << getBankedReg() <<
">";
4029 case k_MemBarrierOpt:
4030 OS <<
"<ARM_MB::" << MemBOptToString(getMemBarrierOpt(),
false) <<
">";
4032 case k_InstSyncBarrierOpt:
4033 OS <<
"<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) <<
">";
4035 case k_TraceSyncBarrierOpt:
4036 OS <<
"<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) <<
">";
4040 if (Memory.BaseRegNum)
4041 OS <<
" base:" <<
RegName(Memory.BaseRegNum);
4042 if (Memory.OffsetImm) {
4043 OS <<
" offset-imm:";
4046 if (Memory.OffsetRegNum)
4047 OS <<
" offset-reg:" << (Memory.isNegative ?
"-" :
"")
4048 <<
RegName(Memory.OffsetRegNum);
4051 OS <<
" shift-imm:" << Memory.ShiftImm;
4053 if (Memory.Alignment)
4054 OS <<
" alignment:" << Memory.Alignment;
4057 case k_PostIndexRegister:
4058 OS <<
"post-idx register " << (PostIdxReg.isAdd ?
"" :
"-")
4059 <<
RegName(PostIdxReg.RegNum);
4062 << PostIdxReg.ShiftImm;
4065 case k_ProcIFlags: {
4066 OS <<
"<ARM_PROC::";
4067 unsigned IFlags = getProcIFlags();
4068 for (
int i=2; i >= 0; --i)
4069 if (IFlags & (1 << i))
4077 case k_ShifterImmediate:
4078 OS <<
"<shift " << (ShifterImm.isASR ?
"asr" :
"lsl")
4079 <<
" #" << ShifterImm.Imm <<
">";
4081 case k_ShiftedRegister:
4082 OS <<
"<so_reg_reg " <<
RegName(RegShiftedReg.SrcReg) <<
" "
4084 <<
RegName(RegShiftedReg.ShiftReg) <<
">";
4086 case k_ShiftedImmediate:
4087 OS <<
"<so_reg_imm " <<
RegName(RegShiftedImm.SrcReg) <<
" "
4089 << RegShiftedImm.ShiftImm <<
">";
4091 case k_RotateImmediate:
4092 OS <<
"<ror " <<
" #" << (RotImm.Imm * 8) <<
">";
4094 case k_ModifiedImmediate:
4095 OS <<
"<mod_imm #" << ModImm.Bits <<
", #"
4096 << ModImm.Rot <<
")>";
4098 case k_ConstantPoolImmediate:
4099 OS <<
"<constant_pool_imm #";
4100 MAI.
printExpr(OS, *getConstantPoolImm());
4102 case k_BitfieldDescriptor:
4103 OS <<
"<bitfield " <<
"lsb: " <<
Bitfield.LSB
4104 <<
", width: " <<
Bitfield.Width <<
">";
4106 case k_RegisterList:
4107 case k_RegisterListWithAPSR:
4108 case k_DPRRegisterList:
4109 case k_SPRRegisterList:
4110 case k_FPSRegisterListWithVPR:
4111 case k_FPDRegisterListWithVPR: {
4112 OS <<
"<register_list ";
4114 const SmallVectorImpl<MCRegister> &RegList = getRegList();
4115 for (
auto I = RegList.
begin(),
E = RegList.
end();
I !=
E;) {
4117 if (++
I <
E) OS <<
", ";
4124 OS <<
"<vector_list " << VectorList.Count <<
" * "
4125 <<
RegName(VectorList.RegNum) <<
">";
4127 case k_VectorListAllLanes:
4128 OS <<
"<vector_list(all lanes) " << VectorList.Count <<
" * "
4129 <<
RegName(VectorList.RegNum) <<
">";
4131 case k_VectorListIndexed:
4132 OS <<
"<vector_list(lane " << VectorList.LaneIndex <<
") "
4133 << VectorList.Count <<
" * " <<
RegName(VectorList.RegNum) <<
">";
4139 OS <<
"<vectorindex " << getVectorIndex() <<
">";
4153 ".8",
".16",
".32",
".64",
".i8",
".i16",
".i32",
".i64",
4154 ".u8",
".u16",
".u32",
".u64",
".s8",
".s16",
".s32",
".s64",
4155 ".p8",
".p16",
".f32",
".f64",
".f",
".d"};
4160 unsigned MnemonicOpsEndInd = 1;
4163 if (Operands[0]->isToken() &&
4164 static_cast<ARMOperand &
>(*Operands[0]).
getToken() ==
"cps") {
4165 if (Operands.
size() > 1 && Operands[1]->isImm() &&
4166 static_cast<ARMOperand &
>(*Operands[1]).getImm()->getKind() ==
4169 static_cast<ARMOperand &
>(*Operands[1]).getImm())
4172 static_cast<ARMOperand &
>(*Operands[1]).getImm())
4174 ++MnemonicOpsEndInd;
4178 bool RHSCondCode =
false;
4179 while (MnemonicOpsEndInd < Operands.
size()) {
4180 auto Op =
static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd]);
4182 if (
Op.isITMask()) {
4184 MnemonicOpsEndInd++;
4185 }
else if (
Op.isToken() &&
4189 Op.getToken() ==
".w" ||
Op.getToken() ==
".bf16" ||
4190 Op.getToken() ==
".p64" ||
Op.getToken() ==
".f16" ||
4196 MnemonicOpsEndInd++;
4199 else if (
Op.isCCOut() || (
Op.isCondCode() && !RHSCondCode) ||
4200 Op.isVPTPred() || (
Op.isToken() &&
Op.getToken() ==
".w"))
4201 MnemonicOpsEndInd++;
4205 return MnemonicOpsEndInd;
4208bool ARMAsmParser::parseRegister(MCRegister &
Reg, SMLoc &StartLoc,
4210 const AsmToken &Tok = getParser().getTok();
4213 Reg = tryParseRegister();
4218ParseStatus ARMAsmParser::tryParseRegister(MCRegister &
Reg, SMLoc &StartLoc,
4220 if (parseRegister(
Reg, StartLoc, EndLoc))
4228MCRegister ARMAsmParser::tryParseRegister(
bool AllowOutOfBoundReg) {
4229 MCAsmParser &Parser = getParser();
4230 const AsmToken &Tok = Parser.
getTok();
4232 return MCRegister();
4237 Reg = StringSwitch<MCRegister>(lowerCase)
4238 .Case(
"r13", ARM::SP)
4239 .Case(
"r14", ARM::LR)
4240 .Case(
"r15", ARM::PC)
4241 .Case(
"ip", ARM::R12)
4243 .Case(
"a1", ARM::R0)
4244 .Case(
"a2", ARM::R1)
4245 .Case(
"a3", ARM::R2)
4246 .Case(
"a4", ARM::R3)
4247 .Case(
"v1", ARM::R4)
4248 .Case(
"v2", ARM::R5)
4249 .Case(
"v3", ARM::R6)
4250 .Case(
"v4", ARM::R7)
4251 .Case(
"v5", ARM::R8)
4252 .Case(
"v6", ARM::R9)
4253 .Case(
"v7", ARM::R10)
4254 .Case(
"v8", ARM::R11)
4255 .Case(
"sb", ARM::R9)
4256 .Case(
"sl", ARM::R10)
4257 .Case(
"fp", ARM::R11)
4258 .Default(MCRegister());
4264 auto Entry = RegisterReqs.
find(lowerCase);
4266 if (Entry == RegisterReqs.
end())
4267 return MCRegister();
4269 return Entry->getValue();
4273 if (!AllowOutOfBoundReg && !hasD32() &&
Reg >=
ARM::D16 &&
Reg <= ARM::D31)
4274 return MCRegister();
4281std::optional<ARM_AM::ShiftOpc> ARMAsmParser::tryParseShiftToken() {
4282 MCAsmParser &Parser = getParser();
4283 const AsmToken &Tok = Parser.
getTok();
4285 return std::nullopt;
4288 return StringSwitch<std::optional<ARM_AM::ShiftOpc>>(lowerCase)
4295 .Default(std::nullopt);
4303int ARMAsmParser::tryParseShiftRegister(
OperandVector &Operands) {
4304 MCAsmParser &Parser = getParser();
4307 auto ShiftTyOpt = tryParseShiftToken();
4308 if (ShiftTyOpt == std::nullopt)
4310 auto ShiftTy = ShiftTyOpt.value();
4317 std::unique_ptr<ARMOperand> PrevOp(
4319 if (!PrevOp->isReg())
4320 return Error(PrevOp->getStartLoc(),
"shift must be of a register");
4321 MCRegister SrcReg = PrevOp->getReg();
4325 MCRegister ShiftReg;
4337 const MCExpr *ShiftExpr =
nullptr;
4338 if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4339 Error(ImmLoc,
"invalid immediate shift value");
4345 Error(ImmLoc,
"invalid immediate shift value");
4351 Imm =
CE->getValue();
4355 Error(ImmLoc,
"immediate shift value out of range");
4365 ShiftReg = tryParseRegister();
4367 Error(L,
"expected immediate or register in shift operand");
4372 "expected immediate or register in shift operand");
4378 Operands.
push_back(ARMOperand::CreateShiftedRegister(
4379 ShiftTy, SrcReg, ShiftReg, Imm, S, EndLoc, *
this));
4381 Operands.
push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4393bool ARMAsmParser::tryParseRegisterWithWriteBack(
OperandVector &Operands) {
4394 MCAsmParser &Parser = getParser();
4397 MCRegister
Reg = tryParseRegister();
4401 Operands.
push_back(ARMOperand::CreateReg(
Reg, RegStartLoc, RegEndLoc, *
this));
4403 const AsmToken &ExclaimTok = Parser.
getTok();
4406 ExclaimTok.
getLoc(), *
this));
4418 const MCExpr *ImmVal;
4419 if (getParser().parseExpression(ImmVal))
4423 return TokError(
"immediate value expected for vector index");
4450 if (Name.size() < 2 || Name[0] != CoprocOp)
4452 Name = (Name[1] ==
'r') ? Name.drop_front(2) : Name.drop_front();
4454 switch (Name.size()) {
4477 case '0':
return 10;
4478 case '1':
return 11;
4479 case '2':
return 12;
4480 case '3':
return 13;
4481 case '4':
return 14;
4482 case '5':
return 15;
4488ParseStatus ARMAsmParser::parseITCondCode(
OperandVector &Operands) {
4489 MCAsmParser &Parser = getParser();
4491 const AsmToken &Tok = Parser.
getTok();
4508ParseStatus ARMAsmParser::parseCoprocNumOperand(
OperandVector &Operands) {
4509 MCAsmParser &Parser = getParser();
4511 const AsmToken &Tok = Parser.
getTok();
4522 Operands.
push_back(ARMOperand::CreateCoprocNum(Num, S, *
this));
4529ParseStatus ARMAsmParser::parseCoprocRegOperand(
OperandVector &Operands) {
4530 MCAsmParser &Parser = getParser();
4532 const AsmToken &Tok = Parser.
getTok();
4541 Operands.
push_back(ARMOperand::CreateCoprocReg(
Reg, S, *
this));
4547ParseStatus ARMAsmParser::parseCoprocOptionOperand(
OperandVector &Operands) {
4548 MCAsmParser &Parser = getParser();
4558 if (getParser().parseExpression(Expr))
4559 return Error(Loc,
"illegal expression");
4561 if (!CE ||
CE->getValue() < 0 ||
CE->getValue() > 255)
4563 "coprocessor option must be an immediate in range [0, 255]");
4564 int Val =
CE->getValue();
4572 Operands.
push_back(ARMOperand::CreateCoprocOption(Val, S,
E, *
this));
4583 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].
contains(
Reg))
4587 case ARM::R0:
return ARM::R1;
case ARM::R1:
return ARM::R2;
4588 case ARM::R2:
return ARM::R3;
case ARM::R3:
return ARM::R4;
4589 case ARM::R4:
return ARM::R5;
case ARM::R5:
return ARM::R6;
4590 case ARM::R6:
return ARM::R7;
case ARM::R7:
return ARM::R8;
4591 case ARM::R8:
return ARM::R9;
case ARM::R9:
return ARM::R10;
4592 case ARM::R10:
return ARM::R11;
case ARM::R11:
return ARM::R12;
4593 case ARM::R12:
return ARM::SP;
case ARM::SP:
return ARM::LR;
4594 case ARM::LR:
return ARM::PC;
case ARM::PC:
return ARM::R0;
4603 Regs.emplace_back(Enc,
Reg);
4604 for (
auto I = Regs.rbegin(), J =
I + 1,
E = Regs.rend(); J !=
E; ++
I, ++J) {
4605 if (J->first == Enc) {
4606 Regs.erase(J.base());
4617bool ARMAsmParser::parseRegisterList(
OperandVector &Operands,
bool EnforceOrder,
4618 bool AllowRAAC,
bool IsLazyLoadStore,
4620 MCAsmParser &Parser = getParser();
4622 return TokError(
"Token is not a Left Curly Brace");
4629 bool AllowOutOfBoundReg = IsLazyLoadStore || IsVSCCLRM;
4630 MCRegister
Reg = tryParseRegister(AllowOutOfBoundReg);
4632 return Error(RegLoc,
"register expected");
4633 if (!AllowRAAC &&
Reg == ARM::RA_AUTH_CODE)
4634 return Error(RegLoc,
"pseudo-register not allowed");
4645 bool VSCCLRMAdjustEncoding =
false;
4648 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4649 Reg = getDRegFromQReg(
Reg);
4654 const MCRegisterClass *RC;
4655 if (
Reg == ARM::RA_AUTH_CODE ||
4656 ARMMCRegisterClasses[ARM::GPRRegClassID].
contains(
Reg))
4657 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4658 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
Reg))
4659 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4660 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
contains(
Reg))
4661 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4662 else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].
contains(
Reg))
4663 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4664 else if (
Reg == ARM::VPR)
4665 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4667 return Error(RegLoc,
"invalid register in register list");
4679 if (
Reg == ARM::RA_AUTH_CODE)
4680 return Error(RegLoc,
"pseudo-register not allowed");
4683 MCRegister EndReg = tryParseRegister(AllowOutOfBoundReg);
4685 return Error(AfterMinusLoc,
"register expected");
4686 if (EndReg == ARM::RA_AUTH_CODE)
4687 return Error(AfterMinusLoc,
"pseudo-register not allowed");
4689 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(EndReg))
4690 EndReg = getDRegFromQReg(EndReg) + 1;
4697 return Error(AfterMinusLoc,
"invalid register in register list");
4700 return Error(AfterMinusLoc,
"bad range in register list");
4703 while (
Reg != EndReg) {
4706 if (VSCCLRMAdjustEncoding)
4709 Warning(AfterMinusLoc, StringRef(
"duplicated register (") +
4711 ") in register list");
4718 MCRegister OldReg =
Reg;
4720 const AsmToken RegTok = Parser.
getTok();
4721 Reg = tryParseRegister(AllowOutOfBoundReg);
4723 return Error(RegLoc,
"register expected");
4724 if (!AllowRAAC &&
Reg == ARM::RA_AUTH_CODE)
4725 return Error(RegLoc,
"pseudo-register not allowed");
4727 bool isQReg =
false;
4728 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4729 Reg = getDRegFromQReg(
Reg);
4733 RC->
getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4734 ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
Reg)) {
4737 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4739 if (
Reg == ARM::VPR &&
4740 (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4741 RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4742 RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4743 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4747 ") in register list");
4753 if (IsVSCCLRM && OldReg == ARM::S31 &&
Reg ==
ARM::D16) {
4754 VSCCLRMAdjustEncoding =
true;
4755 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4758 if ((
Reg == ARM::RA_AUTH_CODE &&
4759 RC != &ARMMCRegisterClasses[ARM::GPRRegClassID]) ||
4761 return Error(RegLoc,
"invalid register in register list");
4767 if (VSCCLRMAdjustEncoding)
4769 if (EnforceOrder && EReg < EOldReg) {
4770 if (ARMMCRegisterClasses[ARM::GPRRegClassID].
contains(
Reg))
4771 Warning(RegLoc,
"register list not in ascending order");
4772 else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].
contains(
Reg))
4773 return Error(RegLoc,
"register list not in ascending order");
4776 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4777 RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4778 EReg != EOldReg + 1)
4779 return Error(RegLoc,
"non-contiguous register range");
4783 ") in register list");
4803 ARMOperand::CreateToken(
"^", Parser.
getTok().
getLoc(), *
this));
4811ParseStatus ARMAsmParser::parseVectorLane(VectorLaneTy &LaneKind,
4812 unsigned &Index, SMLoc &EndLoc) {
4813 MCAsmParser &Parser = getParser();
4819 LaneKind = AllLanes;
4830 const MCExpr *LaneIndex;
4832 if (getParser().parseExpression(LaneIndex))
4833 return Error(Loc,
"illegal expression");
4836 return Error(Loc,
"lane index must be empty or an integer");
4841 int64_t Val =
CE->getValue();
4844 if (Val < 0 || Val > 7)
4847 LaneKind = IndexedLane;
4855ParseStatus ARMAsmParser::parseVectorList(
OperandVector &Operands) {
4856 MCAsmParser &Parser = getParser();
4857 VectorLaneTy LaneKind;
4867 MCRegister
Reg = tryParseRegister();
4870 if (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
Reg)) {
4871 ParseStatus Res = parseVectorLane(LaneKind, LaneIndex,
E);
4876 Operands.
push_back(ARMOperand::CreateReg(
Reg, S,
E, *
this));
4880 ARMOperand::CreateVectorListAllLanes(
Reg, 1,
false, S,
E, *
this));
4883 Operands.
push_back(ARMOperand::CreateVectorListIndexed(
4884 Reg, 1, LaneIndex,
false, S,
E, *
this));
4889 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4890 Reg = getDRegFromQReg(
Reg);
4891 ParseStatus Res = parseVectorLane(LaneKind, LaneIndex,
E);
4896 Operands.
push_back(ARMOperand::CreateReg(
Reg, S,
E, *
this));
4900 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4902 ARMOperand::CreateVectorListAllLanes(
Reg, 2,
false, S,
E, *
this));
4905 Operands.
push_back(ARMOperand::CreateVectorListIndexed(
4906 Reg, 2, LaneIndex,
false, S,
E, *
this));
4911 Operands.
push_back(ARMOperand::CreateReg(
Reg, S,
E, *
this));
4921 MCRegister
Reg = tryParseRegister();
4923 return Error(RegLoc,
"register expected");
4926 MCRegister FirstReg =
Reg;
4928 if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
Reg))
4930 "vector register in range Q0-Q7 expected");
4933 else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4934 FirstReg =
Reg = getDRegFromQReg(
Reg);
4942 if (!parseVectorLane(LaneKind, LaneIndex,
E).isSuccess())
4950 else if (Spacing == 2)
4952 "sequential registers in double spaced list");
4955 MCRegister EndReg = tryParseRegister();
4957 return Error(AfterMinusLoc,
"register expected");
4959 if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(EndReg))
4960 EndReg = getDRegFromQReg(EndReg) + 1;
4967 !ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(EndReg)) ||
4969 !ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(EndReg)))
4970 return Error(AfterMinusLoc,
"invalid register in register list");
4973 return Error(AfterMinusLoc,
"bad range in register list");
4975 VectorLaneTy NextLaneKind;
4976 unsigned NextLaneIndex;
4977 if (!parseVectorLane(NextLaneKind, NextLaneIndex,
E).isSuccess())
4979 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4980 return Error(AfterMinusLoc,
"mismatched lane index in register list");
4989 MCRegister OldReg =
Reg;
4990 Reg = tryParseRegister();
4992 return Error(RegLoc,
"register expected");
4995 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
Reg))
4996 return Error(RegLoc,
"vector register in range Q0-Q7 expected");
5005 else if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
5008 else if (Spacing == 2)
5011 "invalid register in double-spaced list (must be 'D' register')");
5012 Reg = getDRegFromQReg(
Reg);
5013 if (
Reg != OldReg + 1)
5014 return Error(RegLoc,
"non-contiguous register range");
5018 VectorLaneTy NextLaneKind;
5019 unsigned NextLaneIndex;
5021 if (!parseVectorLane(NextLaneKind, NextLaneIndex,
E).isSuccess())
5023 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
5024 return Error(LaneLoc,
"mismatched lane index in register list");
5031 Spacing = 1 + (
Reg == OldReg + 2);
5034 if (
Reg != OldReg + Spacing)
5035 return Error(RegLoc,
"non-contiguous register range");
5038 VectorLaneTy NextLaneKind;
5039 unsigned NextLaneIndex;
5041 if (!parseVectorLane(NextLaneKind, NextLaneIndex,
E).isSuccess())
5043 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
5044 return Error(EndLoc,
"mismatched lane index in register list");
5057 if (
Count == 2 && !hasMVE()) {
5058 const MCRegisterClass *RC = (Spacing == 1) ?
5059 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
5060 &ARMMCRegisterClasses[
ARM::DPairSpcRegClassID];
5063 auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
5064 ARMOperand::CreateVectorListAllLanes);
5065 Operands.
push_back(Create(FirstReg,
Count, (Spacing == 2), S,
E, *
this));
5069 Operands.
push_back(ARMOperand::CreateVectorListIndexed(
5070 FirstReg,
Count, LaneIndex, (Spacing == 2), S,
E, *
this));
5077ParseStatus ARMAsmParser::parseMemBarrierOptOperand(
OperandVector &Operands) {
5078 MCAsmParser &Parser = getParser();
5080 const AsmToken &Tok = Parser.
getTok();
5086 Opt = StringSwitch<unsigned>(OptStr.
lower())
5121 const MCExpr *MemBarrierID;
5122 if (getParser().parseExpression(MemBarrierID))
5123 return Error(Loc,
"illegal expression");
5127 return Error(Loc,
"constant expression expected");
5129 int Val =
CE->getValue();
5131 return Error(Loc,
"immediate value out of range");
5136 "expected an immediate or barrier type");
5144ARMAsmParser::parseTraceSyncBarrierOptOperand(
OperandVector &Operands) {
5145 MCAsmParser &Parser = getParser();
5147 const AsmToken &Tok = Parser.
getTok();
5158 ARMOperand::CreateTraceSyncBarrierOpt(
ARM_TSB::CSYNC, S, *
this));
5164ARMAsmParser::parseInstSyncBarrierOptOperand(
OperandVector &Operands) {
5165 MCAsmParser &Parser = getParser();
5167 const AsmToken &Tok = Parser.
getTok();
5186 const MCExpr *ISBarrierID;
5187 if (getParser().parseExpression(ISBarrierID))
5188 return Error(Loc,
"illegal expression");
5192 return Error(Loc,
"constant expression expected");
5194 int Val =
CE->getValue();
5196 return Error(Loc,
"immediate value out of range");
5201 "expected an immediate or barrier type");
5203 Operands.
push_back(ARMOperand::CreateInstSyncBarrierOpt(
5209ParseStatus ARMAsmParser::parseProcIFlagsOperand(
OperandVector &Operands) {
5210 MCAsmParser &Parser = getParser();
5212 const AsmToken &Tok = Parser.
getTok();
5220 if (IFlagsStr !=
"none") {
5221 for (
int i = 0, e = IFlagsStr.
size(); i != e; ++i) {
5222 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.
substr(i, 1).
lower())
5230 if (Flag == ~0U || (IFlags & Flag))
5244ParseStatus ARMAsmParser::parseMSRMaskOperand(
OperandVector &Operands) {
5246 if (
static_cast<ARMOperand &
>(*Operands.
back()).isMSRMask() ||
5247 static_cast<ARMOperand &
>(*Operands.
back()).isBankedReg())
5249 MCAsmParser &Parser = getParser();
5251 const AsmToken &Tok = Parser.
getTok();
5255 if (Val > 255 || Val < 0) {
5258 unsigned SYSmvalue = Val & 0xFF;
5260 Operands.
push_back(ARMOperand::CreateMSRMask(SYSmvalue, S, *
this));
5269 auto TheReg = ARMSysReg::lookupMClassSysRegByName(
Mask.lower());
5270 if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
5273 unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
5276 Operands.
push_back(ARMOperand::CreateMSRMask(SYSmvalue, S, *
this));
5282 StringRef
Flags =
"";
5283 std::string SpecReg =
Mask.slice(Start,
Next).lower();
5290 unsigned FlagsVal = 0;
5292 if (SpecReg ==
"apsr") {
5293 FlagsVal = StringSwitch<unsigned>(Flags)
5296 .Case(
"nzcvqg", 0xc)
5299 if (FlagsVal == ~0U) {
5305 }
else if (SpecReg ==
"cpsr" || SpecReg ==
"spsr") {
5307 if (Flags ==
"all" || Flags ==
"")
5309 for (
int i = 0, e =
Flags.size(); i != e; ++i) {
5310 unsigned Flag = StringSwitch<unsigned>(
Flags.substr(i, 1))
5319 if (Flag == ~0U || (FlagsVal & Flag))
5335 if (SpecReg ==
"spsr")
5339 Operands.
push_back(ARMOperand::CreateMSRMask(FlagsVal, S, *
this));
5345ParseStatus ARMAsmParser::parseBankedRegOperand(
OperandVector &Operands) {
5347 if (
static_cast<ARMOperand &
>(*Operands.
back()).isBankedReg() ||
5348 static_cast<ARMOperand &
>(*Operands.
back()).isMSRMask())
5350 MCAsmParser &Parser = getParser();
5352 const AsmToken &Tok = Parser.
getTok();
5357 auto TheReg = ARMBankedReg::lookupBankedRegByName(
RegName.lower());
5360 unsigned Encoding = TheReg->Encoding;
5363 Operands.
push_back(ARMOperand::CreateBankedReg(Encoding, S, *
this));
5370ParseStatus ARMAsmParser::parsePKHImm(
OperandVector &Operands,
5372 MCAsmParser &Parser = getParser();
5373 auto ShiftCodeOpt = tryParseShiftToken();
5375 if (!ShiftCodeOpt.has_value())
5377 auto ShiftCode = ShiftCodeOpt.value();
5381 if (ShiftCode !=
Op)
5393 const MCExpr *ShiftAmount;
5396 if (getParser().parseExpression(ShiftAmount, EndLoc))
5397 return Error(Loc,
"illegal expression");
5400 return Error(Loc,
"constant expression expected");
5401 int Val =
CE->getValue();
5402 if (Val < Low || Val >
High)
5403 return Error(Loc,
"immediate value out of range");
5405 Operands.
push_back(ARMOperand::CreateImm(CE, Loc, EndLoc, *
this));
5410ParseStatus ARMAsmParser::parseSetEndImm(
OperandVector &Operands) {
5411 MCAsmParser &Parser = getParser();
5412 const AsmToken &Tok = Parser.
getTok();
5415 return Error(S,
"'be' or 'le' operand expected");
5423 return Error(S,
"'be' or 'le' operand expected");
5424 Operands.
push_back(ARMOperand::CreateImm(
5434ParseStatus ARMAsmParser::parseShifterImm(
OperandVector &Operands) {
5435 MCAsmParser &Parser = getParser();
5436 const AsmToken &Tok = Parser.
getTok();
5442 if (ShiftName ==
"lsl" || ShiftName ==
"LSL")
5444 else if (ShiftName ==
"asr" || ShiftName ==
"ASR")
5457 const MCExpr *ShiftAmount;
5459 if (getParser().parseExpression(ShiftAmount, EndLoc))
5460 return Error(ExLoc,
"malformed shift expression");
5463 return Error(ExLoc,
"shift amount must be an immediate");
5465 int64_t Val =
CE->getValue();
5468 if (Val < 1 || Val > 32)
5469 return Error(ExLoc,
"'asr' shift amount must be in range [1,32]");
5472 return Error(ExLoc,
"'asr #32' shift amount not allowed in Thumb mode");
5473 if (Val == 32) Val = 0;
5476 if (Val < 0 || Val > 31)
5477 return Error(ExLoc,
"'lsr' shift amount must be in range [0,31]");
5481 ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc, *
this));
5489ParseStatus ARMAsmParser::parseRotImm(
OperandVector &Operands) {
5490 MCAsmParser &Parser = getParser();
5491 const AsmToken &Tok = Parser.
getTok();
5496 if (ShiftName !=
"ror" && ShiftName !=
"ROR")
5507 const MCExpr *ShiftAmount;
5509 if (getParser().parseExpression(ShiftAmount, EndLoc))
5510 return Error(ExLoc,
"malformed rotate expression");
5513 return Error(ExLoc,
"rotate amount must be an immediate");
5515 int64_t Val =
CE->getValue();
5519 if (Val != 8 && Val != 16 && Val != 24 && Val != 0)
5520 return Error(ExLoc,
"'ror' rotate amount must be 8, 16, or 24");
5522 Operands.
push_back(ARMOperand::CreateRotImm(Val, S, EndLoc, *
this));
5527ParseStatus ARMAsmParser::parseModImm(
OperandVector &Operands) {
5528 MCAsmParser &Parser = getParser();
5529 AsmLexer &Lexer = getLexer();
5560 const MCExpr *Imm1Exp;
5561 if (getParser().parseExpression(Imm1Exp, Ex1))
5562 return Error(Sx1,
"malformed expression");
5568 Imm1 =
CE->getValue();
5572 Operands.
push_back(ARMOperand::CreateModImm(
5573 (Enc & 0xFF), (Enc & 0xF00) >> 7, Sx1, Ex1, *
this));
5584 Operands.
push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1, *
this));
5590 Operands.
push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1, *
this));
5597 "expected modified immediate operand: #[0, 255], #even[0-30]");
5600 return Error(Sx1,
"immediate operand must a number in the range [0, 255]");
5614 const MCExpr *Imm2Exp;
5615 if (getParser().parseExpression(Imm2Exp, Ex2))
5616 return Error(Sx2,
"malformed expression");
5621 Imm2 =
CE->getValue();
5622 if (!(Imm2 & ~0x1E)) {
5624 Operands.
push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2, *
this));
5628 "immediate operand must an even number in the range [0, 30]");
5630 return Error(Sx2,
"constant expression expected");
5634ParseStatus ARMAsmParser::parseBitfield(
OperandVector &Operands) {
5635 MCAsmParser &Parser = getParser();
5643 const MCExpr *LSBExpr;
5645 if (getParser().parseExpression(LSBExpr))
5646 return Error(
E,
"malformed immediate expression");
5649 return Error(
E,
"'lsb' operand must be an immediate");
5651 int64_t LSB =
CE->getValue();
5653 if (LSB < 0 || LSB > 31)
5654 return Error(
E,
"'lsb' operand must be in the range [0,31]");
5666 const MCExpr *WidthExpr;
5668 if (getParser().parseExpression(WidthExpr, EndLoc))
5669 return Error(
E,
"malformed immediate expression");
5672 return Error(
E,
"'width' operand must be an immediate");
5674 int64_t Width =
CE->getValue();
5676 if (Width < 1 || Width > 32 - LSB)
5677 return Error(
E,
"'width' operand must be in the range [1,32-lsb]");
5679 Operands.
push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc, *
this));
5684ParseStatus ARMAsmParser::parsePostIdxReg(
OperandVector &Operands) {
5693 MCAsmParser &Parser = getParser();
5694 AsmToken Tok = Parser.
getTok();
5696 bool haveEaten =
false;
5708 MCRegister
Reg = tryParseRegister();
5716 unsigned ShiftImm = 0;
5719 if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
5727 ARMOperand::CreatePostIdxReg(
Reg, isAdd, ShiftTy, ShiftImm, S,
E, *
this));
5732ParseStatus ARMAsmParser::parseAM3Offset(
OperandVector &Operands) {
5744 MCAsmParser &Parser = getParser();
5745 AsmToken Tok = Parser.
getTok();
5757 if (getParser().parseExpression(
Offset,
E))
5761 return Error(S,
"constant expression expected");
5764 int32_t Val =
CE->getValue();
5765 if (isNegative && Val == 0)
5766 Val = std::numeric_limits<int32_t>::min();
5768 Operands.
push_back(ARMOperand::CreateImm(
5774 bool haveEaten =
false;
5786 MCRegister
Reg = tryParseRegister();
5793 Operands.
push_back(ARMOperand::CreatePostIdxReg(
5801 unsigned MnemonicOpsEndInd) {
5802 for (
unsigned I = 1;
I < MnemonicOpsEndInd; ++
I) {
5803 auto Op =
static_cast<ARMOperand &
>(*Operands[
I]);
5804 if (
Op.isCondCode())
5811 unsigned MnemonicOpsEndInd) {
5812 for (
unsigned I = 1;
I < MnemonicOpsEndInd; ++
I) {
5813 auto Op =
static_cast<ARMOperand &
>(*Operands[
I]);
5823void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
5827 unsigned CondOutI =
findCCOutInd(Operands, MnemonicOpsEndInd);
5830 unsigned RegRd = MnemonicOpsEndInd;
5831 unsigned RegRn = MnemonicOpsEndInd + 1;
5832 unsigned RegRm = MnemonicOpsEndInd;
5834 if (Operands.
size() == MnemonicOpsEndInd + 3) {
5837 if (((ARMOperand &)*Operands[RegRd]).
getReg() ==
5838 ((ARMOperand &)*Operands[MnemonicOpsEndInd + 1]).
getReg()) {
5839 RegRn = MnemonicOpsEndInd + 2;
5840 RegRm = MnemonicOpsEndInd + 1;
5842 RegRn = MnemonicOpsEndInd + 1;
5843 RegRm = MnemonicOpsEndInd + 2;
5848 ((ARMOperand &)*Operands[RegRd]).addRegOperands(Inst, 1);
5850 if (CondOutI != 0) {
5851 ((ARMOperand &)*Operands[CondOutI]).addCCOutOperands(Inst, 1);
5854 *ARMOperand::CreateCCOut(0, Operands[0]->getEndLoc(), *
this);
5855 Op.addCCOutOperands(Inst, 1);
5858 ((ARMOperand &)*Operands[RegRn]).addRegOperands(Inst, 1);
5860 ((ARMOperand &)*Operands[RegRm]).addRegOperands(Inst, 1);
5864 ((ARMOperand &)*Operands[CondI]).addCondCodeOperands(Inst, 2);
5866 ARMOperand
Op = *ARMOperand::CreateCondCode(
5868 Op.addCondCodeOperands(Inst, 2);
5872void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
5878 :
static_cast<ARMOperand &
>(*Operands[CondI]).getCondCode());
5886 case ARM::tBcc: Inst.
setOpcode(ARM::tB);
break;
5887 case ARM::t2Bcc: Inst.
setOpcode(ARM::t2B);
break;
5906 ARMOperand &
op =
static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd]);
5907 if (!
op.isSignedOffset<11, 1>() &&
isThumb() && hasV8MBaseline())
5913 ARMOperand &
op =
static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd]);
5914 if (!
op.isSignedOffset<8, 1>() &&
isThumb() && hasV8MBaseline())
5919 ((ARMOperand &)*Operands[MnemonicOpsEndInd]).addImmOperands(Inst, 1);
5921 ((ARMOperand &)*Operands[CondI]).addCondCodeOperands(Inst, 2);
5923 ARMOperand
Op = *ARMOperand::CreateCondCode(
5925 Op.addCondCodeOperands(Inst, 2);
5929void ARMAsmParser::cvtMVEVMOVQtoDReg(
5936 assert(Operands.
size() == MnemonicOpsEndInd + 6);
5938 ((ARMOperand &)*Operands[MnemonicOpsEndInd]).addRegOperands(Inst, 1);
5939 ((ARMOperand &)*Operands[MnemonicOpsEndInd + 1])
5940 .addRegOperands(Inst, 1);
5941 ((ARMOperand &)*Operands[MnemonicOpsEndInd + 2])
5942 .addRegOperands(Inst, 1);
5943 ((ARMOperand &)*Operands[MnemonicOpsEndInd + 3])
5944 .addMVEPairVectorIndexOperands(Inst, 1);
5946 ((ARMOperand &)*Operands[MnemonicOpsEndInd + 5])
5947 .addMVEPairVectorIndexOperands(Inst, 1);
5949 ((ARMOperand &)*Operands[CondI])
5950 .addCondCodeOperands(Inst, 2);
5953 *ARMOperand::CreateCondCode(
ARMCC::AL, Operands[0]->getEndLoc(), *
this);
5954 Op.addCondCodeOperands(Inst, 2);
5961 MCAsmParser &Parser = getParser();
5964 return TokError(
"Token is not a Left Bracket");
5968 const AsmToken &BaseRegTok = Parser.
getTok();
5969 MCRegister
BaseReg = tryParseRegister();
5971 return Error(BaseRegTok.
getLoc(),
"register expected");
5974 const AsmToken &Tok = Parser.
getTok();
5977 return Error(Tok.
getLoc(),
"malformed memory operand");
5983 Operands.
push_back(ARMOperand::CreateMem(
5990 ARMOperand::CreateToken(
"!", Parser.
getTok().
getLoc(), *
this));
5998 "Lost colon or comma in memory operand?!");
6007 SMLoc AlignmentLoc = Tok.
getLoc();
6010 if (getParser().parseExpression(Expr))
6018 return Error (
E,
"constant expression expected");
6021 switch (
CE->getValue()) {
6024 "alignment specifier must be 16, 32, 64, 128, or 256 bits");
6025 case 16:
Align = 2;
break;
6026 case 32:
Align = 4;
break;
6027 case 64:
Align = 8;
break;
6028 case 128:
Align = 16;
break;
6029 case 256:
Align = 32;
break;
6040 Operands.
push_back(ARMOperand::CreateMem(BaseReg,
nullptr, 0,
6042 S,
E, *
this, AlignmentLoc));
6048 ARMOperand::CreateToken(
"!", Parser.
getTok().
getLoc(), *
this));
6068 const MCExpr *
Offset, *AdjustedOffset;
6069 if (getParser().parseExpression(
Offset))
6075 int32_t Val =
CE->getValue();
6076 if (isNegative && Val == 0)
6081 AdjustedOffset =
CE;
6084 Operands.
push_back(ARMOperand::CreateMem(BaseReg, AdjustedOffset, 0,
6098 ARMOperand::CreateToken(
"!", Parser.
getTok().
getLoc(), *
this));
6106 bool isNegative =
false;
6116 MCRegister OffsetReg = tryParseRegister();
6118 return Error(
E,
"register expected");
6122 unsigned ShiftImm = 0;
6125 if (parseMemRegOffsetShift(ShiftType, ShiftImm))
6135 Operands.
push_back(ARMOperand::CreateMem(BaseReg,
nullptr, OffsetReg,
6136 ShiftType, ShiftImm, 0, isNegative,
6143 ARMOperand::CreateToken(
"!", Parser.
getTok().
getLoc(), *
this));
6156 MCAsmParser &Parser = getParser();
6158 const AsmToken &Tok = Parser.
getTok();
6160 return Error(Loc,
"illegal shift operator");
6162 if (ShiftName ==
"lsl" || ShiftName ==
"LSL" ||
6163 ShiftName ==
"asl" || ShiftName ==
"ASL")
6165 else if (ShiftName ==
"lsr" || ShiftName ==
"LSR")
6167 else if (ShiftName ==
"asr" || ShiftName ==
"ASR")
6169 else if (ShiftName ==
"ror" || ShiftName ==
"ROR")
6171 else if (ShiftName ==
"rrx" || ShiftName ==
"RRX")
6173 else if (ShiftName ==
"uxtw" || ShiftName ==
"UXTW")
6176 return Error(Loc,
"illegal shift operator");
6184 const AsmToken &HashTok = Parser.
getTok();
6191 if (getParser().parseExpression(Expr))
6198 return Error(Loc,
"shift amount must be an immediate");
6199 int64_t
Imm =
CE->getValue();
6203 return Error(Loc,
"immediate shift value out of range");
6217ParseStatus ARMAsmParser::parseFPImm(
OperandVector &Operands) {
6220 MCAsmParser &Parser = getParser();
6247 bool isVmovf =
false;
6249 for (
unsigned I = 1;
I < MnemonicOpsEndInd; ++
I) {
6250 ARMOperand &TyOp =
static_cast<ARMOperand &
>(*Operands[
I]);
6251 if (TyOp.isToken() &&
6252 (TyOp.getToken() ==
".f32" || TyOp.getToken() ==
".f64" ||
6253 TyOp.getToken() ==
".f16")) {
6259 ARMOperand &Mnemonic =
static_cast<ARMOperand &
>(*Operands[0]);
6260 bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() ==
"fconstd" ||
6261 Mnemonic.getToken() ==
"fconsts");
6262 if (!(isVmovf || isFconst))
6268 bool isNegative =
false;
6273 const AsmToken &Tok = Parser.
getTok();
6274 SMLoc Loc = Tok.
getLoc();
6277 uint64_t
IntVal = RealVal.bitcastToAPInt().getZExtValue();
6279 IntVal ^= (uint64_t)isNegative << 31;
6291 if (Val > 255 || Val < 0)
6292 return Error(Loc,
"encoded floating point value out of range");
6294 Val =
APFloat(RealVal).bitcastToAPInt().getZExtValue();
6302 return Error(Loc,
"invalid floating point immediate");
6307bool ARMAsmParser::parseOperand(
OperandVector &Operands, StringRef Mnemonic) {
6308 MCAsmParser &Parser = getParser();
6313 ParseStatus ResTy = MatchOperandParserImpl(Operands, Mnemonic);
6322 switch (getLexer().getKind()) {
6330 bool ExpectLabel = Mnemonic ==
"b" || Mnemonic ==
"bl";
6332 if (!tryParseRegisterWithWriteBack(Operands))
6334 int Res = tryParseShiftRegister(Operands);
6340 if (Mnemonic ==
"vmrs" &&
6344 Operands.
push_back(ARMOperand::CreateToken(
"APSR_nzcv", S, *
this));
6359 const MCExpr *IdVal;
6361 if (getParser().parseExpression(IdVal))
6364 Operands.
push_back(ARMOperand::CreateImm(IdVal, S,
E, *
this));
6368 return parseMemory(Operands);
6370 bool IsLazyLoadStore = Mnemonic ==
"vlldm" || Mnemonic ==
"vlstm";
6371 bool IsVSCCLRM = Mnemonic ==
"vscclrm";
6372 return parseRegisterList(Operands, !Mnemonic.
starts_with(
"clr"),
false,
6373 IsLazyLoadStore, IsVSCCLRM);
6386 auto AdjacentToken = getLexer().peekTok(
false);
6390 if (!ExpectIdentifier) {
6398 const MCExpr *ImmVal;
6399 if (getParser().parseExpression(ImmVal))
6403 int32_t Val =
CE->getValue();
6404 if (IsNegative && Val == 0)
6409 Operands.
push_back(ARMOperand::CreateImm(ImmVal, S,
E, *
this));
6415 Operands.
push_back(ARMOperand::CreateToken(
6431 if (parsePrefix(Spec))
6434 const MCExpr *SubExprVal;
6435 if (getParser().parseExpression(SubExprVal))
6438 const auto *ExprVal =
6441 Operands.
push_back(ARMOperand::CreateImm(ExprVal, S,
E, *
this));
6446 if (Mnemonic !=
"ldr")
6447 return Error(S,
"unexpected token in operand");
6449 const MCExpr *SubExprVal;
6450 if (getParser().parseExpression(SubExprVal))
6457 ARMOperand::CreateConstantPoolImm(SubExprVal, S,
E, *
this));
6463bool ARMAsmParser::parseImmExpr(int64_t &Out) {
6464 const MCExpr *Expr =
nullptr;
6465 SMLoc
L = getParser().getTok().getLoc();
6466 if (check(getParser().parseExpression(Expr), L,
"expected expression"))
6469 if (check(!
Value, L,
"expected constant expression"))
6471 Out =
Value->getValue();
6479 MCAsmParser &Parser = getParser();
6500 static const struct PrefixEntry {
6501 const char *Spelling;
6503 uint8_t SupportedFormats;
6504 } PrefixEntries[] = {
6516 llvm::find_if(PrefixEntries, [&IDVal](
const PrefixEntry &PE) {
6517 return PE.Spelling == IDVal;
6519 if (Prefix == std::end(PrefixEntries)) {
6524 uint8_t CurrentFormat;
6527 CurrentFormat = MACHO;
6530 CurrentFormat =
ELF;
6533 CurrentFormat =
COFF;
6536 CurrentFormat = WASM;
6546 if (~
Prefix->SupportedFormats & CurrentFormat) {
6548 "cannot represent relocation in the current file format");
6572StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
6576 unsigned &ProcessorIMod,
6577 StringRef &ITMask) {
6580 CarrySetting =
false;
6586 if ((Mnemonic ==
"movs" &&
isThumb()) || Mnemonic ==
"teq" ||
6587 Mnemonic ==
"vceq" || Mnemonic ==
"svc" || Mnemonic ==
"mls" ||
6588 Mnemonic ==
"smmls" || Mnemonic ==
"vcls" || Mnemonic ==
"vmls" ||
6589 Mnemonic ==
"vnmls" || Mnemonic ==
"vacge" || Mnemonic ==
"vcge" ||
6590 Mnemonic ==
"vclt" || Mnemonic ==
"vacgt" || Mnemonic ==
"vaclt" ||
6591 Mnemonic ==
"vacle" || Mnemonic ==
"hlt" || Mnemonic ==
"vcgt" ||
6592 Mnemonic ==
"vcle" || Mnemonic ==
"smlal" || Mnemonic ==
"umaal" ||
6593 Mnemonic ==
"umlal" || Mnemonic ==
"vabal" || Mnemonic ==
"vmlal" ||
6594 Mnemonic ==
"vpadal" || Mnemonic ==
"vqdmlal" || Mnemonic ==
"fmuls" ||
6595 Mnemonic ==
"vmaxnm" || Mnemonic ==
"vminnm" || Mnemonic ==
"vcvta" ||
6596 Mnemonic ==
"vcvtn" || Mnemonic ==
"vcvtp" || Mnemonic ==
"vcvtm" ||
6597 Mnemonic ==
"vrinta" || Mnemonic ==
"vrintn" || Mnemonic ==
"vrintp" ||
6598 Mnemonic ==
"vrintm" || Mnemonic ==
"hvc" ||
6599 Mnemonic.
starts_with(
"vsel") || Mnemonic ==
"vins" ||
6600 Mnemonic ==
"vmovx" || Mnemonic ==
"bxns" || Mnemonic ==
"blxns" ||
6601 Mnemonic ==
"vdot" || Mnemonic ==
"vmmla" || Mnemonic ==
"vudot" ||
6602 Mnemonic ==
"vsdot" || Mnemonic ==
"vcmla" || Mnemonic ==
"vcadd" ||
6603 Mnemonic ==
"vfmal" || Mnemonic ==
"vfmsl" || Mnemonic ==
"wls" ||
6604 Mnemonic ==
"le" || Mnemonic ==
"dls" || Mnemonic ==
"csel" ||
6605 Mnemonic ==
"csinc" || Mnemonic ==
"csinv" || Mnemonic ==
"csneg" ||
6606 Mnemonic ==
"cinc" || Mnemonic ==
"cinv" || Mnemonic ==
"cneg" ||
6607 Mnemonic ==
"cset" || Mnemonic ==
"csetm" || Mnemonic ==
"aut" ||
6608 Mnemonic ==
"pac" || Mnemonic ==
"pacbti" || Mnemonic ==
"bti")
6613 if (Mnemonic !=
"adcs" && Mnemonic !=
"bics" && Mnemonic !=
"movs" &&
6614 Mnemonic !=
"muls" && Mnemonic !=
"smlals" && Mnemonic !=
"smulls" &&
6615 Mnemonic !=
"umlals" && Mnemonic !=
"umulls" && Mnemonic !=
"lsls" &&
6616 Mnemonic !=
"sbcs" && Mnemonic !=
"rscs" &&
6618 (Mnemonic ==
"vmine" || Mnemonic ==
"vshle" || Mnemonic ==
"vshlt" ||
6619 Mnemonic ==
"vshllt" || Mnemonic ==
"vrshle" || Mnemonic ==
"vrshlt" ||
6620 Mnemonic ==
"vmvne" || Mnemonic ==
"vorne" || Mnemonic ==
"vnege" ||
6621 Mnemonic ==
"vnegt" || Mnemonic ==
"vmule" || Mnemonic ==
"vmult" ||
6622 Mnemonic ==
"vrintne" || Mnemonic ==
"vcmult" ||
6623 Mnemonic ==
"vcmule" || Mnemonic ==
"vpsele" || Mnemonic ==
"vpselt" ||
6627 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size() - 2);
6635 !(Mnemonic ==
"cps" || Mnemonic ==
"mls" || Mnemonic ==
"mrs" ||
6636 Mnemonic ==
"smmls" || Mnemonic ==
"vabs" || Mnemonic ==
"vcls" ||
6637 Mnemonic ==
"vmls" || Mnemonic ==
"vmrs" || Mnemonic ==
"vnmls" ||
6638 Mnemonic ==
"vqabs" || Mnemonic ==
"vrecps" || Mnemonic ==
"vrsqrts" ||
6639 Mnemonic ==
"srs" || Mnemonic ==
"flds" || Mnemonic ==
"fmrs" ||
6640 Mnemonic ==
"fsqrts" || Mnemonic ==
"fsubs" || Mnemonic ==
"fsts" ||
6641 Mnemonic ==
"fcpys" || Mnemonic ==
"fdivs" || Mnemonic ==
"fmuls" ||
6642 Mnemonic ==
"fcmps" || Mnemonic ==
"fcmpzs" || Mnemonic ==
"vfms" ||
6643 Mnemonic ==
"vfnms" || Mnemonic ==
"fconsts" || Mnemonic ==
"bxns" ||
6644 Mnemonic ==
"blxns" || Mnemonic ==
"vfmas" || Mnemonic ==
"vmlas" ||
6645 (Mnemonic ==
"movs" &&
isThumb()))) {
6646 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size() - 1);
6647 CarrySetting =
true;
6655 StringSwitch<unsigned>(Mnemonic.
substr(Mnemonic.
size()-2, 2))
6660 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size()-2);
6661 ProcessorIMod =
IMod;
6665 if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic !=
"vmovlt" &&
6666 Mnemonic !=
"vshllt" && Mnemonic !=
"vrshrnt" && Mnemonic !=
"vshrnt" &&
6667 Mnemonic !=
"vqrshrunt" && Mnemonic !=
"vqshrunt" &&
6668 Mnemonic !=
"vqrshrnt" && Mnemonic !=
"vqshrnt" && Mnemonic !=
"vmullt" &&
6669 Mnemonic !=
"vqmovnt" && Mnemonic !=
"vqmovunt" && Mnemonic !=
"vmovnt" &&
6670 Mnemonic !=
"vqdmullt" && Mnemonic !=
"vpnot" && Mnemonic !=
"vcvtt" &&
6671 Mnemonic !=
"vcvt") {
6675 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size()-1);
6683 ITMask = Mnemonic.
substr(2);
6684 Mnemonic = Mnemonic.
slice(0, 2);
6688 ITMask = Mnemonic.
substr(4);
6689 Mnemonic = Mnemonic.
slice(0, 4);
6691 ITMask = Mnemonic.
substr(3);
6692 Mnemonic = Mnemonic.
slice(0, 3);
6702void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic,
6703 StringRef ExtraToken,
6705 bool &CanAcceptCarrySet,
6706 bool &CanAcceptPredicationCode,
6707 bool &CanAcceptVPTPredicationCode) {
6708 CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
6711 Mnemonic ==
"and" || Mnemonic ==
"lsl" || Mnemonic ==
"lsr" ||
6712 Mnemonic ==
"rrx" || Mnemonic ==
"ror" || Mnemonic ==
"sub" ||
6713 Mnemonic ==
"add" || Mnemonic ==
"adc" || Mnemonic ==
"mul" ||
6714 Mnemonic ==
"bic" || Mnemonic ==
"asr" || Mnemonic ==
"orr" ||
6715 Mnemonic ==
"mvn" || Mnemonic ==
"rsb" || Mnemonic ==
"rsc" ||
6716 Mnemonic ==
"orn" || Mnemonic ==
"sbc" || Mnemonic ==
"eor" ||
6717 Mnemonic ==
"neg" || Mnemonic ==
"vfm" || Mnemonic ==
"vfnm" ||
6719 (Mnemonic ==
"smull" || Mnemonic ==
"mov" || Mnemonic ==
"mla" ||
6720 Mnemonic ==
"smlal" || Mnemonic ==
"umlal" || Mnemonic ==
"umull"));
6722 if (Mnemonic ==
"bkpt" || Mnemonic ==
"cbnz" || Mnemonic ==
"setend" ||
6723 Mnemonic ==
"cps" || Mnemonic ==
"it" || Mnemonic ==
"cbz" ||
6724 Mnemonic ==
"trap" || Mnemonic ==
"hlt" || Mnemonic ==
"udf" ||
6726 Mnemonic.
starts_with(
"vsel") || Mnemonic ==
"vmaxnm" ||
6727 Mnemonic ==
"vminnm" || Mnemonic ==
"vcvta" || Mnemonic ==
"vcvtn" ||
6728 Mnemonic ==
"vcvtp" || Mnemonic ==
"vcvtm" || Mnemonic ==
"vrinta" ||
6729 Mnemonic ==
"vrintn" || Mnemonic ==
"vrintp" || Mnemonic ==
"vrintm" ||
6730 Mnemonic.
starts_with(
"aes") || Mnemonic ==
"hvc" ||
6731 Mnemonic ==
"setpan" || Mnemonic.
starts_with(
"sha1") ||
6734 Mnemonic ==
"vmovx" || Mnemonic ==
"vins" || Mnemonic ==
"vudot" ||
6735 Mnemonic ==
"vsdot" || Mnemonic ==
"vcmla" || Mnemonic ==
"vcadd" ||
6736 Mnemonic ==
"vfmal" || Mnemonic ==
"vfmsl" || Mnemonic ==
"vfmat" ||
6737 Mnemonic ==
"vfmab" || Mnemonic ==
"vdot" || Mnemonic ==
"vmmla" ||
6738 Mnemonic ==
"sb" || Mnemonic ==
"ssbb" || Mnemonic ==
"pssbb" ||
6739 Mnemonic ==
"vsmmla" || Mnemonic ==
"vummla" || Mnemonic ==
"vusmmla" ||
6740 Mnemonic ==
"vusdot" || Mnemonic ==
"vsudot" || Mnemonic ==
"bfcsel" ||
6741 Mnemonic ==
"wls" || Mnemonic ==
"dls" || Mnemonic ==
"le" ||
6742 Mnemonic ==
"csel" || Mnemonic ==
"csinc" || Mnemonic ==
"csinv" ||
6743 Mnemonic ==
"csneg" || Mnemonic ==
"cinc" || Mnemonic ==
"cinv" ||
6744 Mnemonic ==
"cneg" || Mnemonic ==
"cset" || Mnemonic ==
"csetm" ||
6745 (hasCDE() && MS.isCDEInstr(Mnemonic) &&
6746 !MS.isITPredicableCDEInstr(Mnemonic)) ||
6748 Mnemonic ==
"pac" || Mnemonic ==
"pacbti" || Mnemonic ==
"aut" ||
6749 Mnemonic ==
"bti" ||
6756 CanAcceptPredicationCode =
false;
6759 CanAcceptPredicationCode =
6760 Mnemonic !=
"cdp2" && Mnemonic !=
"clrex" && Mnemonic !=
"mcr2" &&
6761 Mnemonic !=
"mcrr2" && Mnemonic !=
"mrc2" && Mnemonic !=
"mrrc2" &&
6762 Mnemonic !=
"dmb" && Mnemonic !=
"dfb" && Mnemonic !=
"dsb" &&
6763 Mnemonic !=
"isb" && Mnemonic !=
"pld" && Mnemonic !=
"pli" &&
6764 Mnemonic !=
"pldw" && Mnemonic !=
"ldc2" && Mnemonic !=
"ldc2l" &&
6765 Mnemonic !=
"stc2" && Mnemonic !=
"stc2l" && Mnemonic !=
"tsb" &&
6767 }
else if (isThumbOne()) {
6769 CanAcceptPredicationCode = Mnemonic !=
"movs";
6771 CanAcceptPredicationCode = Mnemonic !=
"nop" && Mnemonic !=
"movs";
6773 CanAcceptPredicationCode =
true;
6777 for (
unsigned I = 0;
I < MnemonicOpsEndInd; ++
I) {
6778 auto &
Op =
static_cast<ARMOperand &
>(*Operands[
I]);
6779 if (
Op.isToken() &&
Op.getToken() ==
".w")
6789void ARMAsmParser::tryConvertingToTwoOperandForm(
6795 if (Operands.
size() != MnemonicOpsEndInd + 3)
6798 const auto &Op3 =
static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd]);
6799 auto &Op4 =
static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 1]);
6800 if (!Op3.isReg() || !Op4.isReg())
6803 auto Op3Reg = Op3.getReg();
6804 auto Op4Reg = Op4.getReg();
6810 auto &Op5 =
static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 2]);
6812 if (Mnemonic !=
"add")
6814 bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
6815 (Op5.isReg() && Op5.getReg() == ARM::PC);
6816 if (!TryTransform) {
6817 TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
6818 (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
6819 !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
6820 Op5.isImm() && !Op5.isImm0_508s4());
6824 }
else if (!isThumbOne())
6827 if (!(Mnemonic ==
"add" || Mnemonic ==
"sub" || Mnemonic ==
"and" ||
6828 Mnemonic ==
"eor" || Mnemonic ==
"lsl" || Mnemonic ==
"lsr" ||
6829 Mnemonic ==
"asr" || Mnemonic ==
"adc" || Mnemonic ==
"sbc" ||
6830 Mnemonic ==
"ror" || Mnemonic ==
"orr" || Mnemonic ==
"bic"))
6836 bool Transform = Op3Reg == Op4Reg;
6841 const ARMOperand *LastOp = &Op5;
6843 if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
6844 ((Mnemonic ==
"add" && Op4Reg != ARM::SP) ||
6845 Mnemonic ==
"and" || Mnemonic ==
"eor" ||
6846 Mnemonic ==
"adc" || Mnemonic ==
"orr")) {
6857 if (((Mnemonic ==
"add" && CarrySetting) || Mnemonic ==
"sub") &&
6863 if ((Mnemonic ==
"add" || Mnemonic ==
"sub") && LastOp->isImm0_7())
6870 Operands.
erase(Operands.
begin() + MnemonicOpsEndInd);
6879 ARMOperand &
Op =
static_cast<ARMOperand &
>(MCOp);
6889bool ARMAsmParser::shouldOmitVectorPredicateOperand(
6890 StringRef Mnemonic,
OperandVector &Operands,
unsigned MnemonicOpsEndInd) {
6891 if (!hasMVE() || Operands.
size() <= MnemonicOpsEndInd)
6904 for (
auto &Operand : Operands) {
6905 if (
static_cast<ARMOperand &
>(*Operand).isVectorIndex() ||
6906 ((*Operand).isReg() &&
6907 (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
6908 (*Operand).getReg()) ||
6909 ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6910 (*Operand).getReg())))) {
6916 for (
auto &Operand : Operands) {
6920 if (
static_cast<ARMOperand &
>(*Operand).isVectorIndex() ||
6921 static_cast<ARMOperand &
>(*Operand).isQReg())
6937 unsigned VariantID);
6948void ARMAsmParser::fixupGNULDRDAlias(
StringRef Mnemonic,
6950 unsigned MnemonicOpsEndInd) {
6951 if (Mnemonic !=
"ldrd" && Mnemonic !=
"strd" && Mnemonic !=
"ldrexd" &&
6952 Mnemonic !=
"strexd" && Mnemonic !=
"ldaexd" && Mnemonic !=
"stlexd")
6955 unsigned IdX = Mnemonic ==
"strexd" || Mnemonic ==
"stlexd"
6956 ? MnemonicOpsEndInd + 1
6957 : MnemonicOpsEndInd;
6959 if (Operands.
size() < IdX + 2)
6962 ARMOperand &Op2 =
static_cast<ARMOperand &
>(*Operands[IdX]);
6963 ARMOperand &Op3 =
static_cast<ARMOperand &
>(*Operands[IdX + 1]);
6967 if (!Op3.isGPRMem())
6975 if (!
isThumb() && (RtEncoding & 1)) {
6980 if (Op2.getReg() == ARM::PC)
6982 MCRegister PairedReg = GPR.
getRegister(RtEncoding + 1);
6983 if (!PairedReg || PairedReg == ARM::PC ||
6984 (PairedReg == ARM::SP && !hasV8Ops()))
6988 ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(),
6989 Op2.getEndLoc(), *
this));
6997bool ARMAsmParser::CDEConvertDualRegOperand(StringRef Mnemonic,
6999 unsigned MnemonicOpsEndInd) {
7000 assert(MS.isCDEDualRegInstr(Mnemonic));
7002 if (Operands.
size() < 3 + MnemonicOpsEndInd)
7006 "operand must be an even-numbered register in the range [r0, r10]");
7008 const MCParsedAsmOperand &Op2 = *Operands[MnemonicOpsEndInd + 1];
7039 RPair = ARM::R10_R11;
7043 const MCParsedAsmOperand &Op3 = *Operands[MnemonicOpsEndInd + 2];
7047 Operands.
erase(Operands.
begin() + MnemonicOpsEndInd + 2);
7048 Operands[MnemonicOpsEndInd + 1] =
7054 for (
unsigned I = 0;
I < MnemonicOpsEndInd; ++
I)
7055 if (
static_cast<ARMOperand &
>(*Operands[
I]).isCondCode()) {
7057 --MnemonicOpsEndInd;
7063 for (
unsigned I = 0;
I < MnemonicOpsEndInd; ++
I)
7064 if (
static_cast<ARMOperand &
>(*Operands[
I]).isCCOut()) {
7066 --MnemonicOpsEndInd;
7072 for (
unsigned I = 0;
I < MnemonicOpsEndInd; ++
I)
7073 if (
static_cast<ARMOperand &
>(*Operands[
I]).isVPTPred()) {
7075 --MnemonicOpsEndInd;
7081bool ARMAsmParser::parseInstruction(ParseInstructionInfo &Info, StringRef Name,
7083 MCAsmParser &Parser = getParser();
7090 const FeatureBitset &AvailableFeatures = getAvailableFeatures();
7091 unsigned AssemblerDialect = getParser().getAssemblerDialect();
7097 parseDirectiveReq(Name, NameLoc);
7105 StringRef Mnemonic =
Name.slice(Start,
Next);
7111 unsigned ProcessorIMod;
7114 Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
7115 CarrySetting, ProcessorIMod, ITMask);
7118 if (isThumbOne() && PredicationCode !=
ARMCC::AL && Mnemonic !=
"b") {
7119 return Error(NameLoc,
"conditional execution not supported in Thumb1");
7122 Operands.
push_back(ARMOperand::CreateToken(Mnemonic, NameLoc, *
this));
7135 if (Mnemonic ==
"it" || Mnemonic.
starts_with(
"vpt") ||
7138 Mnemonic ==
"vpt" ? SMLoc::getFromPointer(NameLoc.
getPointer() + 3) :
7139 SMLoc::getFromPointer(NameLoc.
getPointer() + 4);
7140 if (ITMask.
size() > 3) {
7141 if (Mnemonic ==
"it")
7142 return Error(Loc,
"too many conditions on IT instruction");
7143 return Error(Loc,
"too many conditions on VPT instruction");
7147 if (Pos !=
't' && Pos !=
'e') {
7148 return Error(Loc,
"illegal IT block condition mask '" + ITMask +
"'");
7154 Operands.
push_back(ARMOperand::CreateITMask(Mask, Loc, *
this));
7167 bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
7168 getMnemonicAcceptInfo(Mnemonic, ExtraToken, Name, CanAcceptCarrySet,
7169 CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
7173 if (!CanAcceptCarrySet && CarrySetting) {
7174 return Error(NameLoc,
"instruction '" + Mnemonic +
7175 "' can not set flags, but 's' suffix specified");
7179 if (!CanAcceptPredicationCode && PredicationCode !=
ARMCC::AL) {
7180 return Error(NameLoc,
"instruction '" + Mnemonic +
7181 "' is not predicable, but condition code specified");
7186 if (!CanAcceptVPTPredicationCode && VPTPredicationCode !=
ARMVCC::None) {
7187 return Error(NameLoc,
"instruction '" + Mnemonic +
7188 "' is not VPT predicable, but VPT code T/E is specified");
7192 if (CanAcceptCarrySet && CarrySetting) {
7194 Operands.
push_back(ARMOperand::CreateCCOut(
7195 CarrySetting ? ARM::CPSR : ARM::NoRegister, Loc, *
this));
7202 Operands.
push_back(ARMOperand::CreateCondCode(
7210 !(Mnemonic.
starts_with(
"vcvt") && Mnemonic !=
"vcvta" &&
7211 Mnemonic !=
"vcvtn" && Mnemonic !=
"vcvtp" && Mnemonic !=
"vcvtm")) {
7214 Operands.
push_back(ARMOperand::CreateVPTPred(
7219 if (ProcessorIMod) {
7220 Operands.
push_back(ARMOperand::CreateImm(
7223 }
else if (Mnemonic ==
"cps" && isMClass()) {
7224 return Error(NameLoc,
"instruction 'cps' requires effect for M-class");
7231 ExtraToken =
Name.slice(Start,
Next);
7240 if (ExtraToken ==
".n" && !
isThumb()) {
7242 return Error(Loc,
"instruction with .n (narrow) qualifier not allowed in "
7249 if (ExtraToken !=
".n" && (
isThumb() || ExtraToken !=
".w")) {
7251 Operands.
push_back(ARMOperand::CreateToken(ExtraToken, Loc, *
this));
7258 unsigned MnemonicOpsEndInd = Operands.
size();
7263 if (parseOperand(Operands, Mnemonic)) {
7269 if (parseOperand(Operands, Mnemonic)) {
7278 tryConvertingToTwoOperandForm(Mnemonic, PredicationCode, CarrySetting,
7279 Operands, MnemonicOpsEndInd);
7281 if (hasCDE() && MS.isCDEInstr(Mnemonic)) {
7289 if (MS.isCDEDualRegInstr(Mnemonic)) {
7291 CDEConvertDualRegOperand(Mnemonic, Operands, MnemonicOpsEndInd);
7298 if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7299 MnemonicOpsEndInd) &&
7300 Mnemonic ==
"vmov" && PredicationCode ==
ARMCC::LT) {
7308 Mnemonic.
size() - 1 + CarrySetting);
7311 Operands.
insert(Operands.
begin(), ARMOperand::CreateToken(
7312 StringRef(
"vmovlt"), MLoc, *
this));
7313 }
else if (Mnemonic ==
"vcvt" && PredicationCode ==
ARMCC::NE &&
7314 !shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7315 MnemonicOpsEndInd)) {
7324 Mnemonic.
size() - 1 + CarrySetting);
7328 ARMOperand::CreateToken(StringRef(
"vcvtn"), MLoc, *
this));
7329 }
else if (Mnemonic ==
"vmul" && PredicationCode ==
ARMCC::LT &&
7330 !shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7331 MnemonicOpsEndInd)) {
7338 Operands.
insert(Operands.
begin(), ARMOperand::CreateToken(
7339 StringRef(
"vmullt"), MLoc, *
this));
7344 if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7345 MnemonicOpsEndInd)) {
7352 if (Mnemonic.
starts_with(
"vcvtt") && MnemonicOpsEndInd > 2) {
7354 static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd - 2]);
7356 static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd - 1]);
7357 if (!(Sz1.isToken() && Sz1.getToken().starts_with(
".f") &&
7358 Sz2.isToken() && Sz2.getToken().starts_with(
".f"))) {
7363 Mnemonic = Mnemonic.
substr(0, 4);
7365 ARMOperand::CreateToken(Mnemonic, MLoc, *
this));
7369 Mnemonic.
size() + CarrySetting);
7372 ARMOperand::CreateVPTPred(
7374 ++MnemonicOpsEndInd;
7376 }
else if (CanAcceptVPTPredicationCode) {
7380 if (shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7381 MnemonicOpsEndInd)) {
7388 bool usedVPTPredicationCode =
false;
7389 for (
unsigned I = 1;
I < Operands.
size(); ++
I)
7390 if (
static_cast<ARMOperand &
>(*Operands[
I]).isVPTPred())
7391 usedVPTPredicationCode =
true;
7392 if (!usedVPTPredicationCode) {
7400 Mnemonic =
Name.slice(0, Mnemonic.
size() + 1);
7403 ARMOperand::CreateToken(Mnemonic, NameLoc, *
this));
7412 if (!
isThumb() && Mnemonic ==
"blx" &&
7413 Operands.
size() == MnemonicOpsEndInd + 1 &&
7414 static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd]).isImm())
7418 fixupGNULDRDAlias(Mnemonic, Operands, MnemonicOpsEndInd);
7427 bool IsLoad = (Mnemonic ==
"ldrexd" || Mnemonic ==
"ldaexd");
7428 if (!
isThumb() && Operands.
size() > MnemonicOpsEndInd + 1 + (!IsLoad) &&
7429 (Mnemonic ==
"ldrexd" || Mnemonic ==
"strexd" || Mnemonic ==
"ldaexd" ||
7430 Mnemonic ==
"stlexd")) {
7431 unsigned Idx = IsLoad ? MnemonicOpsEndInd : MnemonicOpsEndInd + 1;
7432 ARMOperand &Op1 =
static_cast<ARMOperand &
>(*Operands[Idx]);
7433 ARMOperand &Op2 =
static_cast<ARMOperand &
>(*Operands[Idx + 1]);
7435 const MCRegisterClass &MRC = MRI->
getRegClass(ARM::GPRRegClassID);
7437 if (Op1.isReg() && MRC.
contains(Op1.getReg())) {
7438 MCRegister Reg1 = Op1.getReg();
7440 MCRegister Reg2 = Op2.getReg();
7444 return Error(Op2.getStartLoc(),
7445 IsLoad ?
"destination operands must be sequential"
7446 :
"source operands must be sequential");
7452 IsLoad ?
"destination operands must start start at an even register"
7453 :
"source operands must start start at an even register");
7456 Reg1, ARM::gsub_0, &(MRI->
getRegClass(ARM::GPRPairRegClassID)));
7457 Operands[Idx] = ARMOperand::CreateReg(NewReg, Op1.getStartLoc(),
7458 Op2.getEndLoc(), *
this);
7468 if (isThumbTwo() && Mnemonic ==
"sub" &&
7469 Operands.
size() == MnemonicOpsEndInd + 3 &&
7470 static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd]).isReg() &&
7471 static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd]).getReg() ==
7473 static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 1]).isReg() &&
7474 static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 1]).getReg() ==
7476 static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 2]).isImm()) {
7477 Operands.
front() = ARMOperand::CreateToken(Name, NameLoc, *
this);
7517 return Inst.
getOpcode() == ARM::tBKPT ||
7524 unsigned MnemonicOpsEndInd) {
7525 for (
unsigned I = MnemonicOpsEndInd;
I < Operands.
size(); ++
I) {
7526 const ARMOperand &
Op =
static_cast<const ARMOperand &
>(*Operands[
I]);
7527 if (
Op.isRegList()) {
7534bool ARMAsmParser::validatetLDMRegList(
const MCInst &Inst,
7536 unsigned MnemonicOpsEndInd,
7537 unsigned ListIndex,
bool IsARPop) {
7542 if (!IsARPop && ListContainsSP)
7544 Operands[
getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7545 "SP may not be in the register list");
7546 if (ListContainsPC && ListContainsLR)
7548 Operands[
getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7549 "PC and LR may not be in the register list simultaneously");
7553bool ARMAsmParser::validatetSTMRegList(
const MCInst &Inst,
7555 unsigned MnemonicOpsEndInd,
7556 unsigned ListIndex) {
7560 if (ListContainsSP && ListContainsPC)
7562 Operands[
getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7563 "SP and PC may not be in the register list");
7566 Operands[
getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7567 "SP may not be in the register list");
7570 Operands[
getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7571 "PC may not be in the register list");
7575bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst,
const OperandVector &Operands,
7576 bool Load,
bool ARMMode,
bool Writeback,
7577 unsigned MnemonicOpsEndInd) {
7578 unsigned RtIndex =
Load || !Writeback ? 0 : 1;
7585 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7590 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7591 "Rt must be even-numbered");
7594 if (Rt2 != Rt + 1) {
7596 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7597 "destination operands must be sequential");
7599 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7600 "source operands must be sequential");
7607 if (!ARMMode && Load) {
7609 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7610 "destination operands can't be identical");
7616 if (Rn == Rt || Rn == Rt2) {
7618 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7619 "base register needs to be different from destination "
7622 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7623 "source register and base register can't be identical");
7634 for (
unsigned i = 0; i <
MCID.NumOperands; ++i) {
7646 ARMOperand &
Op =
static_cast<ARMOperand &
>(MCOp);
7653bool ARMAsmParser::validateInstruction(MCInst &Inst,
7655 unsigned MnemonicOpsEndInd) {
7656 const MCInstrDesc &MCID = MII.get(Inst.
getOpcode());
7657 SMLoc Loc = Operands[0]->getStartLoc();
7665 return Error(Loc,
"instructions in IT block must be predicable");
7668 if (
Cond != currentITCond()) {
7670 SMLoc CondLoc = Operands[0]->getEndLoc();
7671 for (
unsigned I = 1;
I < Operands.
size(); ++
I)
7672 if (
static_cast<ARMOperand &
>(*Operands[
I]).isCondCode())
7673 CondLoc = Operands[
I]->getStartLoc();
7674 return Error(CondLoc,
"incorrect condition in IT block; got '" +
7676 "', but expected '" +
7685 return Error(Loc,
"predicated instructions must be in IT block");
7689 return Warning(Loc,
"predicated instructions should be in IT block");
7696 if (MCID.
operands()[i].isPredicate()) {
7698 return Error(Loc,
"instruction is not predicable");
7706 if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
7707 return Error(Loc,
"instruction must be outside of IT block or the last instruction in an IT block");
7711 unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition);
7713 return Error(Loc,
"instruction in VPT block must be predicable");
7716 if (Pred != VPTPred) {
7718 for (
unsigned I = 1;
I < Operands.
size(); ++
I)
7719 if (
static_cast<ARMOperand &
>(*Operands[
I]).isVPTPred())
7720 PredLoc = Operands[
I]->getStartLoc();
7721 return Error(PredLoc,
"incorrect predication in VPT block; got '" +
7723 "', but expected '" +
7730 return Error(Loc,
"VPT predicated instructions must be in VPT block");
7732 const unsigned Opcode = Inst.
getOpcode();
7737 case ARM::VLSTM_T2: {
7740 if (Operands.
size() ==
7741 MnemonicOpsEndInd + 2) {
7742 ARMOperand &
Op =
static_cast<ARMOperand &
>(
7743 *Operands[MnemonicOpsEndInd + 1]);
7745 auto &RegList =
Op.getRegList();
7747 if (RegList.size() == 32 && !hasV8_1MMainline()) {
7748 return Error(
Op.getEndLoc(),
"T2 version requires v8.1-M.Main");
7751 if (hasD32() && RegList.size() != 32) {
7752 return Error(
Op.getEndLoc(),
"operand must be exactly {d0-d31}");
7755 if (!hasD32() && (RegList.size() != 16 && RegList.size() != 32)) {
7757 "operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)");
7773 return Error(Loc,
"unpredictable IT predicate sequence");
7777 if (validateLDRDSTRD(Inst, Operands,
true,
true,
7778 false, MnemonicOpsEndInd))
7782 case ARM::LDRD_POST:
7783 if (validateLDRDSTRD(Inst, Operands,
true,
true,
7784 true, MnemonicOpsEndInd))
7788 if (validateLDRDSTRD(Inst, Operands,
true,
false,
7789 false, MnemonicOpsEndInd))
7792 case ARM::t2LDRD_PRE:
7793 case ARM::t2LDRD_POST:
7794 if (validateLDRDSTRD(Inst, Operands,
true,
false,
7795 true, MnemonicOpsEndInd))
7801 if (RmReg == ARM::SP && !hasV8Ops())
7802 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7803 "r13 (SP) is an unpredictable operand to BXJ");
7807 if (validateLDRDSTRD(Inst, Operands,
false,
true,
7808 false, MnemonicOpsEndInd))
7812 case ARM::STRD_POST:
7813 if (validateLDRDSTRD(Inst, Operands,
false,
true,
7814 true, MnemonicOpsEndInd))
7817 case ARM::t2STRD_PRE:
7818 case ARM::t2STRD_POST:
7819 if (validateLDRDSTRD(Inst, Operands,
false,
false,
7820 true, MnemonicOpsEndInd))
7823 case ARM::STR_PRE_IMM:
7824 case ARM::STR_PRE_REG:
7825 case ARM::t2STR_PRE:
7826 case ARM::STR_POST_IMM:
7827 case ARM::STR_POST_REG:
7828 case ARM::t2STR_POST:
7830 case ARM::t2STRH_PRE:
7831 case ARM::STRH_POST:
7832 case ARM::t2STRH_POST:
7833 case ARM::STRB_PRE_IMM:
7834 case ARM::STRB_PRE_REG:
7835 case ARM::t2STRB_PRE:
7836 case ARM::STRB_POST_IMM:
7837 case ARM::STRB_POST_REG:
7838 case ARM::t2STRB_POST: {
7844 return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
7845 "source register and base register can't be identical");
7848 case ARM::t2LDR_PRE_imm:
7849 case ARM::t2LDR_POST_imm:
7850 case ARM::t2STR_PRE_imm:
7851 case ARM::t2STR_POST_imm: {
7857 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7858 "destination register and base register can't be identical");
7859 if (Inst.
getOpcode() == ARM::t2LDR_POST_imm ||
7860 Inst.
getOpcode() == ARM::t2STR_POST_imm) {
7862 if (Imm > 255 || Imm < -255)
7863 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7864 "operand must be in range [-255, 255]");
7866 if (Inst.
getOpcode() == ARM::t2STR_PRE_imm ||
7867 Inst.
getOpcode() == ARM::t2STR_POST_imm) {
7869 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7870 "operand must be a register in range [r0, r14]");
7876 case ARM::t2LDRB_OFFSET_imm:
7877 case ARM::t2LDRB_PRE_imm:
7878 case ARM::t2LDRB_POST_imm:
7879 case ARM::t2STRB_OFFSET_imm:
7880 case ARM::t2STRB_PRE_imm:
7881 case ARM::t2STRB_POST_imm: {
7882 if (Inst.
getOpcode() == ARM::t2LDRB_POST_imm ||
7883 Inst.
getOpcode() == ARM::t2STRB_POST_imm ||
7884 Inst.
getOpcode() == ARM::t2LDRB_PRE_imm ||
7885 Inst.
getOpcode() == ARM::t2STRB_PRE_imm) {
7887 if (Imm > 255 || Imm < -255)
7888 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7889 "operand must be in range [-255, 255]");
7890 }
else if (Inst.
getOpcode() == ARM::t2LDRB_OFFSET_imm ||
7891 Inst.
getOpcode() == ARM::t2STRB_OFFSET_imm) {
7893 if (Imm > 0 || Imm < -255)
7894 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7895 "operand must be in range [0, 255] with a negative sign");
7898 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7899 "if operand is PC, should call the LDRB (literal)");
7904 case ARM::t2LDRH_OFFSET_imm:
7905 case ARM::t2LDRH_PRE_imm:
7906 case ARM::t2LDRH_POST_imm:
7907 case ARM::t2STRH_OFFSET_imm:
7908 case ARM::t2STRH_PRE_imm:
7909 case ARM::t2STRH_POST_imm: {
7910 if (Inst.
getOpcode() == ARM::t2LDRH_POST_imm ||
7911 Inst.
getOpcode() == ARM::t2STRH_POST_imm ||
7912 Inst.
getOpcode() == ARM::t2LDRH_PRE_imm ||
7913 Inst.
getOpcode() == ARM::t2STRH_PRE_imm) {
7915 if (Imm > 255 || Imm < -255)
7916 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7917 "operand must be in range [-255, 255]");
7918 }
else if (Inst.
getOpcode() == ARM::t2LDRH_OFFSET_imm ||
7919 Inst.
getOpcode() == ARM::t2STRH_OFFSET_imm) {
7921 if (Imm > 0 || Imm < -255)
7922 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7923 "operand must be in range [0, 255] with a negative sign");
7926 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7927 "if operand is PC, should call the LDRH (literal)");
7932 case ARM::t2LDRSB_OFFSET_imm:
7933 case ARM::t2LDRSB_PRE_imm:
7934 case ARM::t2LDRSB_POST_imm: {
7935 if (Inst.
getOpcode() == ARM::t2LDRSB_POST_imm ||
7936 Inst.
getOpcode() == ARM::t2LDRSB_PRE_imm) {
7938 if (Imm > 255 || Imm < -255)
7939 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7940 "operand must be in range [-255, 255]");
7941 }
else if (Inst.
getOpcode() == ARM::t2LDRSB_OFFSET_imm) {
7943 if (Imm > 0 || Imm < -255)
7944 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7945 "operand must be in range [0, 255] with a negative sign");
7948 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7949 "if operand is PC, should call the LDRH (literal)");
7954 case ARM::t2LDRSH_OFFSET_imm:
7955 case ARM::t2LDRSH_PRE_imm:
7956 case ARM::t2LDRSH_POST_imm: {
7957 if (Inst.
getOpcode() == ARM::t2LDRSH_POST_imm ||
7958 Inst.
getOpcode() == ARM::t2LDRSH_PRE_imm) {
7960 if (Imm > 255 || Imm < -255)
7961 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7962 "operand must be in range [-255, 255]");
7963 }
else if (Inst.
getOpcode() == ARM::t2LDRSH_OFFSET_imm) {
7965 if (Imm > 0 || Imm < -255)
7966 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7967 "operand must be in range [0, 255] with a negative sign");
7970 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7971 "if operand is PC, should call the LDRH (literal)");
7976 case ARM::LDR_PRE_IMM:
7977 case ARM::LDR_PRE_REG:
7978 case ARM::t2LDR_PRE:
7979 case ARM::LDR_POST_IMM:
7980 case ARM::LDR_POST_REG:
7981 case ARM::t2LDR_POST:
7983 case ARM::t2LDRH_PRE:
7984 case ARM::LDRH_POST:
7985 case ARM::t2LDRH_POST:
7986 case ARM::LDRSH_PRE:
7987 case ARM::t2LDRSH_PRE:
7988 case ARM::LDRSH_POST:
7989 case ARM::t2LDRSH_POST:
7990 case ARM::LDRB_PRE_IMM:
7991 case ARM::LDRB_PRE_REG:
7992 case ARM::t2LDRB_PRE:
7993 case ARM::LDRB_POST_IMM:
7994 case ARM::LDRB_POST_REG:
7995 case ARM::t2LDRB_POST:
7996 case ARM::LDRSB_PRE:
7997 case ARM::t2LDRSB_PRE:
7998 case ARM::LDRSB_POST:
7999 case ARM::t2LDRSB_POST: {
8005 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8006 "destination register and base register can't be identical");
8010 case ARM::MVE_VLDRBU8_rq:
8011 case ARM::MVE_VLDRBU16_rq:
8012 case ARM::MVE_VLDRBS16_rq:
8013 case ARM::MVE_VLDRBU32_rq:
8014 case ARM::MVE_VLDRBS32_rq:
8015 case ARM::MVE_VLDRHU16_rq:
8016 case ARM::MVE_VLDRHU16_rq_u:
8017 case ARM::MVE_VLDRHU32_rq:
8018 case ARM::MVE_VLDRHU32_rq_u:
8019 case ARM::MVE_VLDRHS32_rq:
8020 case ARM::MVE_VLDRHS32_rq_u:
8021 case ARM::MVE_VLDRWU32_rq:
8022 case ARM::MVE_VLDRWU32_rq_u:
8023 case ARM::MVE_VLDRDU64_rq:
8024 case ARM::MVE_VLDRDU64_rq_u:
8025 case ARM::MVE_VLDRWU32_qi:
8026 case ARM::MVE_VLDRWU32_qi_pre:
8027 case ARM::MVE_VLDRDU64_qi:
8028 case ARM::MVE_VLDRDU64_qi_pre: {
8030 unsigned QdIdx = 0, QmIdx = 2;
8031 bool QmIsPointer =
false;
8033 case ARM::MVE_VLDRWU32_qi:
8034 case ARM::MVE_VLDRDU64_qi:
8038 case ARM::MVE_VLDRWU32_qi_pre:
8039 case ARM::MVE_VLDRDU64_qi_pre:
8049 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8050 Twine(
"destination vector register and vector ") +
8051 (QmIsPointer ?
"pointer" :
"offset") +
8052 " register can't be identical");
8064 if (Widthm1 >= 32 - LSB)
8065 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8066 "bitfield width must be in range [1,32-lsb]");
8078 bool HasWritebackToken =
8079 (
static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 1])
8081 static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 1])
8084 bool ListContainsBase;
8088 Operands[
getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
8089 "registers must be in range r0-r7");
8091 if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
8093 Operands[
getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
8094 "writeback operator '!' expected");
8097 if (ListContainsBase && HasWritebackToken)
8098 return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8099 "writeback operator '!' not allowed when base register "
8100 "in register list");
8102 if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8106 case ARM::LDMIA_UPD:
8107 case ARM::LDMDB_UPD:
8108 case ARM::LDMIB_UPD:
8109 case ARM::LDMDA_UPD:
8115 return Error(Operands.
back()->getStartLoc(),
8116 "writeback register not allowed in register list");
8120 if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8125 if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8128 case ARM::t2LDMIA_UPD:
8129 case ARM::t2LDMDB_UPD:
8130 case ARM::t2STMIA_UPD:
8131 case ARM::t2STMDB_UPD:
8133 return Error(Operands.
back()->getStartLoc(),
8134 "writeback register not allowed in register list");
8136 if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
8137 if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8140 if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8145 case ARM::sysLDMIA_UPD:
8146 case ARM::sysLDMDA_UPD:
8147 case ARM::sysLDMDB_UPD:
8148 case ARM::sysLDMIB_UPD:
8150 return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8151 "writeback register only allowed on system LDM "
8152 "if PC in register-list");
8154 case ARM::sysSTMIA_UPD:
8155 case ARM::sysSTMDA_UPD:
8156 case ARM::sysSTMDB_UPD:
8157 case ARM::sysSTMIB_UPD:
8158 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8159 "system STM cannot have writeback register");
8164 bool ListContainsBase;
8166 ListContainsBase) &&
8168 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8169 "registers must be in range r0-r7 or pc");
8170 if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 2, !isMClass()))
8175 bool ListContainsBase;
8177 ListContainsBase) &&
8179 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8180 "registers must be in range r0-r7 or lr");
8181 if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 2))
8185 case ARM::tSTMIA_UPD: {
8186 bool ListContainsBase, InvalidLowList;
8188 0, ListContainsBase);
8189 if (InvalidLowList && !isThumbTwo())
8190 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8191 "registers must be in range r0-r7");
8195 if (InvalidLowList && ListContainsBase)
8196 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8197 "writeback operator '!' not allowed when base register "
8198 "in register list");
8200 if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 4))
8207 if (!isThumbTwo() &&
8209 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8210 "source register must be the same as destination");
8220 return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8221 "source register must be sp if destination is sp");
8226 if (!(
static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd]))
8227 .isSignedOffset<11, 1>())
8228 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8229 "branch target out of range");
8232 int op = (Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8233 : MnemonicOpsEndInd + 1;
8234 ARMOperand &Operand =
static_cast<ARMOperand &
>(*Operands[
op]);
8237 !Operand.isSignedOffset<24, 1>())
8238 return Error(Operands[
op]->getStartLoc(),
"branch target out of range");
8243 if (!
static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd])
8244 .isSignedOffset<8, 1>())
8245 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8246 "branch target out of range");
8249 int Op = (Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8250 : MnemonicOpsEndInd + 1;
8251 if (!
static_cast<ARMOperand &
>(*Operands[
Op]).isSignedOffset<20, 1>())
8252 return Error(Operands[
Op]->getStartLoc(),
"branch target out of range");
8257 if (!
static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 1])
8258 .isUnsignedOffset<6, 1>())
8259 return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8260 "branch target out of range");
8266 case ARM::t2MOVTi16:
8274 int i = (Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8275 : MnemonicOpsEndInd + 1;
8276 ARMOperand &
Op =
static_cast<ARMOperand &
>(*Operands[i]);
8277 const MCExpr *
E =
Op.getImm();
8281 if (!ARM16Expr || (ARM16Expr->getSpecifier() !=
ARM::S_HI16 &&
8285 "immediate expression for mov requires :lower16: or :upper16");
8289 int i = (Operands[MnemonicOpsEndInd + 1]->isImm()) ? MnemonicOpsEndInd + 1
8290 : MnemonicOpsEndInd + 2;
8291 MCParsedAsmOperand &
Op = *Operands[i];
8293 return Error(
Op.getStartLoc(),
8294 "Immediate expression for Thumb adds requires :lower0_7:,"
8295 " :lower8_15:, :upper0_7: or :upper8_15:");
8299 MCParsedAsmOperand &
Op = *Operands[MnemonicOpsEndInd + 1];
8301 return Error(
Op.getStartLoc(),
8302 "Immediate expression for Thumb movs requires :lower0_7:,"
8303 " :lower8_15:, :upper0_7: or :upper8_15:");
8312 if (Imm8 == 0x10 && Pred !=
ARMCC::AL && hasRAS())
8313 return Error(Operands[1]->getStartLoc(),
"instruction 'esb' is not "
8314 "predicable, but condition "
8317 return Error(Operands[1]->getStartLoc(),
"instruction 'csdb' is not "
8318 "predicable, but condition "
8326 if (!
static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd])
8327 .isUnsignedOffset<4, 1>() ||
8329 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8330 "branch location out of range or not a multiple of 2");
8333 if (Opcode == ARM::t2BFi) {
8334 if (!
static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 1])
8335 .isSignedOffset<16, 1>())
8336 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8337 "branch target out of range or not a multiple of 2");
8338 }
else if (Opcode == ARM::t2BFLi) {
8339 if (!
static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 1])
8340 .isSignedOffset<18, 1>())
8341 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8342 "branch target out of range or not a multiple of 2");
8347 if (!
static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd])
8348 .isUnsignedOffset<4, 1>() ||
8350 return Error(Operands[1]->getStartLoc(),
8351 "branch location out of range or not a multiple of 2");
8353 if (!
static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 1])
8354 .isSignedOffset<16, 1>())
8355 return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8356 "branch target out of range or not a multiple of 2");
8359 "branch location and else branch target should either both be "
8360 "immediates or both labels");
8364 if (Diff != 4 && Diff != 2)
8366 Operands[3]->getStartLoc(),
8367 "else branch target must be 2 or 4 greater than the branch location");
8374 !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
8376 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8377 "invalid register in register list. Valid registers are "
8378 "r0-r12, lr/r14 and APSR.");
8394 return Error(Operands[1]->getStartLoc(),
8395 "instruction 'ssbb' is not predicable, but condition code "
8398 return Error(Operands[1]->getStartLoc(),
8399 "instruction 'pssbb' is not predicable, but condition code "
8403 case ARM::VMOVRRS: {
8408 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8409 "source operands must be sequential");
8412 case ARM::VMOVSRR: {
8417 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8418 "destination operands must be sequential");
8422 case ARM::VSTMDIA: {
8424 static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 1]);
8425 auto &RegList =
Op.getRegList();
8426 if (RegList.size() < 1 || RegList.size() > 16)
8427 return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8428 "list of registers must be at least 1 and at most 16");
8431 case ARM::MVE_VQDMULLs32bh:
8432 case ARM::MVE_VQDMULLs32th:
8433 case ARM::MVE_VCMULf32:
8434 case ARM::MVE_VMULLBs32:
8435 case ARM::MVE_VMULLTs32:
8436 case ARM::MVE_VMULLBu32:
8437 case ARM::MVE_VMULLTu32: {
8438 if (Operands[MnemonicOpsEndInd]->
getReg() ==
8439 Operands[MnemonicOpsEndInd + 1]->
getReg()) {
8440 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8441 "Qd register and Qn register can't be identical");
8443 if (Operands[MnemonicOpsEndInd]->
getReg() ==
8444 Operands[MnemonicOpsEndInd + 2]->
getReg()) {
8445 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8446 "Qd register and Qm register can't be identical");
8450 case ARM::MVE_VREV64_8:
8451 case ARM::MVE_VREV64_16:
8452 case ARM::MVE_VREV64_32:
8453 case ARM::MVE_VQDMULL_qr_s32bh:
8454 case ARM::MVE_VQDMULL_qr_s32th: {
8455 if (Operands[MnemonicOpsEndInd]->
getReg() ==
8456 Operands[MnemonicOpsEndInd + 1]->
getReg()) {
8457 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8458 "Qd register and Qn register can't be identical");
8462 case ARM::MVE_VCADDi32:
8463 case ARM::MVE_VCADDf32:
8464 case ARM::MVE_VHCADDs32: {
8465 if (Operands[MnemonicOpsEndInd]->
getReg() ==
8466 Operands[MnemonicOpsEndInd + 2]->
getReg()) {
8467 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8468 "Qd register and Qm register can't be identical");
8472 case ARM::MVE_VMOV_rr_q: {
8473 if (Operands[MnemonicOpsEndInd + 2]->
getReg() !=
8474 Operands[MnemonicOpsEndInd + 4]->
getReg())
8475 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8476 "Q-registers must be the same");
8477 if (
static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 3])
8478 .getVectorIndex() !=
8479 static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 5])
8482 return Error(Operands[MnemonicOpsEndInd + 3]->getStartLoc(),
8483 "Q-register indexes must be 2 and 0 or 3 and 1");
8486 case ARM::MVE_VMOV_q_rr: {
8487 if (Operands[MnemonicOpsEndInd]->
getReg() !=
8488 Operands[MnemonicOpsEndInd + 2]->
getReg())
8489 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8490 "Q-registers must be the same");
8491 if (
static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 1])
8492 .getVectorIndex() !=
8493 static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 3])
8496 return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8497 "Q-register indexes must be 2 and 0 or 3 and 1");
8500 case ARM::MVE_SQRSHR:
8501 case ARM::MVE_UQRSHL: {
8502 if (Operands[MnemonicOpsEndInd]->
getReg() ==
8503 Operands[MnemonicOpsEndInd + 1]->
getReg()) {
8504 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8505 "Rda register and Rm register can't be identical");
8526 case ARM::t2SMLALBB:
8527 case ARM::t2SMLALBT:
8529 case ARM::t2SMLALDX:
8530 case ARM::t2SMLALTB:
8531 case ARM::t2SMLALTT:
8533 case ARM::t2SMLSLDX:
8534 case ARM::t2SMULL: {
8539 "unpredictable instruction, RdHi and RdLo must be different");
8547 case ARM::CDE_CX1DA:
8551 case ARM::CDE_CX2DA:
8555 case ARM::CDE_CX3DA:
8556 case ARM::CDE_VCX1_vec:
8557 case ARM::CDE_VCX1_fpsp:
8558 case ARM::CDE_VCX1_fpdp:
8559 case ARM::CDE_VCX1A_vec:
8560 case ARM::CDE_VCX1A_fpsp:
8561 case ARM::CDE_VCX1A_fpdp:
8562 case ARM::CDE_VCX2_vec:
8563 case ARM::CDE_VCX2_fpsp:
8564 case ARM::CDE_VCX2_fpdp:
8565 case ARM::CDE_VCX2A_vec:
8566 case ARM::CDE_VCX2A_fpsp:
8567 case ARM::CDE_VCX2A_fpdp:
8568 case ARM::CDE_VCX3_vec:
8569 case ARM::CDE_VCX3_fpsp:
8570 case ARM::CDE_VCX3_fpdp:
8571 case ARM::CDE_VCX3A_vec:
8572 case ARM::CDE_VCX3A_fpsp:
8573 case ARM::CDE_VCX3A_fpdp: {
8575 "CDE operand 1 must be a coprocessor ID");
8578 return Error(Operands[1]->getStartLoc(),
8579 "coprocessor must be configured as CDE");
8580 else if (Coproc >= 8)
8581 return Error(Operands[1]->getStartLoc(),
8582 "coprocessor must be in the range [p0, p7]");
8588 case ARM::t2LDC2L_OFFSET:
8589 case ARM::t2LDC2L_OPTION:
8590 case ARM::t2LDC2L_POST:
8591 case ARM::t2LDC2L_PRE:
8592 case ARM::t2LDC2_OFFSET:
8593 case ARM::t2LDC2_OPTION:
8594 case ARM::t2LDC2_POST:
8595 case ARM::t2LDC2_PRE:
8596 case ARM::t2LDCL_OFFSET:
8597 case ARM::t2LDCL_OPTION:
8598 case ARM::t2LDCL_POST:
8599 case ARM::t2LDCL_PRE:
8600 case ARM::t2LDC_OFFSET:
8601 case ARM::t2LDC_OPTION:
8602 case ARM::t2LDC_POST:
8603 case ARM::t2LDC_PRE:
8612 case ARM::t2STC2L_OFFSET:
8613 case ARM::t2STC2L_OPTION:
8614 case ARM::t2STC2L_POST:
8615 case ARM::t2STC2L_PRE:
8616 case ARM::t2STC2_OFFSET:
8617 case ARM::t2STC2_OPTION:
8618 case ARM::t2STC2_POST:
8619 case ARM::t2STC2_PRE:
8620 case ARM::t2STCL_OFFSET:
8621 case ARM::t2STCL_OPTION:
8622 case ARM::t2STCL_POST:
8623 case ARM::t2STCL_PRE:
8624 case ARM::t2STC_OFFSET:
8625 case ARM::t2STC_OPTION:
8626 case ARM::t2STC_POST:
8627 case ARM::t2STC_PRE: {
8632 if (Opcode == ARM::t2MRRC || Opcode == ARM::t2MRRC2)
8634 else if (Opcode == ARM::t2MRC || Opcode == ARM::t2MRC2)
8637 "Operand must be a coprocessor ID");
8641 return Error(Operands[2]->getStartLoc(),
8642 "coprocessor must be configured as GCP");
8670 if (Operands[MnemonicOpsEndInd]->
getReg() !=
8671 Operands[MnemonicOpsEndInd + 1]->
getReg())
8672 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8673 "source and destination registers must be the same");
8685 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST1LNd8_UPD;
8686 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST1LNd16_UPD;
8687 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST1LNd32_UPD;
8688 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST1LNd8_UPD;
8689 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST1LNd16_UPD;
8690 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST1LNd32_UPD;
8691 case ARM::VST1LNdAsm_8: Spacing = 1;
return ARM::VST1LNd8;
8692 case ARM::VST1LNdAsm_16: Spacing = 1;
return ARM::VST1LNd16;
8693 case ARM::VST1LNdAsm_32: Spacing = 1;
return ARM::VST1LNd32;
8696 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST2LNd8_UPD;
8697 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST2LNd16_UPD;
8698 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST2LNd32_UPD;
8699 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2;
return ARM::VST2LNq16_UPD;
8700 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VST2LNq32_UPD;
8702 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST2LNd8_UPD;
8703 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST2LNd16_UPD;
8704 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST2LNd32_UPD;
8705 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2;
return ARM::VST2LNq16_UPD;
8706 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2;
return ARM::VST2LNq32_UPD;
8708 case ARM::VST2LNdAsm_8: Spacing = 1;
return ARM::VST2LNd8;
8709 case ARM::VST2LNdAsm_16: Spacing = 1;
return ARM::VST2LNd16;
8710 case ARM::VST2LNdAsm_32: Spacing = 1;
return ARM::VST2LNd32;
8711 case ARM::VST2LNqAsm_16: Spacing = 2;
return ARM::VST2LNq16;
8712 case ARM::VST2LNqAsm_32: Spacing = 2;
return ARM::VST2LNq32;
8715 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST3LNd8_UPD;
8716 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST3LNd16_UPD;
8717 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST3LNd32_UPD;
8718 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VST3LNq16_UPD;
8719 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VST3LNq32_UPD;
8720 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST3LNd8_UPD;
8721 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST3LNd16_UPD;
8722 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST3LNd32_UPD;
8723 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2;
return ARM::VST3LNq16_UPD;
8724 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2;
return ARM::VST3LNq32_UPD;
8725 case ARM::VST3LNdAsm_8: Spacing = 1;
return ARM::VST3LNd8;
8726 case ARM::VST3LNdAsm_16: Spacing = 1;
return ARM::VST3LNd16;
8727 case ARM::VST3LNdAsm_32: Spacing = 1;
return ARM::VST3LNd32;
8728 case ARM::VST3LNqAsm_16: Spacing = 2;
return ARM::VST3LNq16;
8729 case ARM::VST3LNqAsm_32: Spacing = 2;
return ARM::VST3LNq32;
8732 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1;
return ARM::VST3d8_UPD;
8733 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1;
return ARM::VST3d16_UPD;
8734 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1;
return ARM::VST3d32_UPD;
8735 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2;
return ARM::VST3q8_UPD;
8736 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2;
return ARM::VST3q16_UPD;
8737 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2;
return ARM::VST3q32_UPD;
8738 case ARM::VST3dWB_register_Asm_8: Spacing = 1;
return ARM::VST3d8_UPD;
8739 case ARM::VST3dWB_register_Asm_16: Spacing = 1;
return ARM::VST3d16_UPD;
8740 case ARM::VST3dWB_register_Asm_32: Spacing = 1;
return ARM::VST3d32_UPD;
8741 case ARM::VST3qWB_register_Asm_8: Spacing = 2;
return ARM::VST3q8_UPD;
8742 case ARM::VST3qWB_register_Asm_16: Spacing = 2;
return ARM::VST3q16_UPD;
8743 case ARM::VST3qWB_register_Asm_32: Spacing = 2;
return ARM::VST3q32_UPD;
8744 case ARM::VST3dAsm_8: Spacing = 1;
return ARM::VST3d8;
8745 case ARM::VST3dAsm_16: Spacing = 1;
return ARM::VST3d16;
8746 case ARM::VST3dAsm_32: Spacing = 1;
return ARM::VST3d32;
8747 case ARM::VST3qAsm_8: Spacing = 2;
return ARM::VST3q8;
8748 case ARM::VST3qAsm_16: Spacing = 2;
return ARM::VST3q16;
8749 case ARM::VST3qAsm_32: Spacing = 2;
return ARM::VST3q32;
8752 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST4LNd8_UPD;
8753 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST4LNd16_UPD;
8754 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST4LNd32_UPD;
8755 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VST4LNq16_UPD;
8756 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VST4LNq32_UPD;
8757 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST4LNd8_UPD;
8758 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST4LNd16_UPD;
8759 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST4LNd32_UPD;
8760 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2;
return ARM::VST4LNq16_UPD;
8761 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2;
return ARM::VST4LNq32_UPD;
8762 case ARM::VST4LNdAsm_8: Spacing = 1;
return ARM::VST4LNd8;
8763 case ARM::VST4LNdAsm_16: Spacing = 1;
return ARM::VST4LNd16;
8764 case ARM::VST4LNdAsm_32: Spacing = 1;
return ARM::VST4LNd32;
8765 case ARM::VST4LNqAsm_16: Spacing = 2;
return ARM::VST4LNq16;
8766 case ARM::VST4LNqAsm_32: Spacing = 2;
return ARM::VST4LNq32;
8769 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1;
return ARM::VST4d8_UPD;
8770 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1;
return ARM::VST4d16_UPD;
8771 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1;
return ARM::VST4d32_UPD;
8772 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2;
return ARM::VST4q8_UPD;
8773 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2;
return ARM::VST4q16_UPD;
8774 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2;
return ARM::VST4q32_UPD;
8775 case ARM::VST4dWB_register_Asm_8: Spacing = 1;
return ARM::VST4d8_UPD;
8776 case ARM::VST4dWB_register_Asm_16: Spacing = 1;
return ARM::VST4d16_UPD;
8777 case ARM::VST4dWB_register_Asm_32: Spacing = 1;
return ARM::VST4d32_UPD;
8778 case ARM::VST4qWB_register_Asm_8: Spacing = 2;
return ARM::VST4q8_UPD;
8779 case ARM::VST4qWB_register_Asm_16: Spacing = 2;
return ARM::VST4q16_UPD;
8780 case ARM::VST4qWB_register_Asm_32: Spacing = 2;
return ARM::VST4q32_UPD;
8781 case ARM::VST4dAsm_8: Spacing = 1;
return ARM::VST4d8;
8782 case ARM::VST4dAsm_16: Spacing = 1;
return ARM::VST4d16;
8783 case ARM::VST4dAsm_32: Spacing = 1;
return ARM::VST4d32;
8784 case ARM::VST4qAsm_8: Spacing = 2;
return ARM::VST4q8;
8785 case ARM::VST4qAsm_16: Spacing = 2;
return ARM::VST4q16;
8786 case ARM::VST4qAsm_32: Spacing = 2;
return ARM::VST4q32;
8794 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD1LNd8_UPD;
8795 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD1LNd16_UPD;
8796 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD1LNd32_UPD;
8797 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD1LNd8_UPD;
8798 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD1LNd16_UPD;
8799 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD1LNd32_UPD;
8800 case ARM::VLD1LNdAsm_8: Spacing = 1;
return ARM::VLD1LNd8;
8801 case ARM::VLD1LNdAsm_16: Spacing = 1;
return ARM::VLD1LNd16;
8802 case ARM::VLD1LNdAsm_32: Spacing = 1;
return ARM::VLD1LNd32;
8805 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD2LNd8_UPD;
8806 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD2LNd16_UPD;
8807 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD2LNd32_UPD;
8808 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD2LNq16_UPD;
8809 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD2LNq32_UPD;
8810 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD2LNd8_UPD;
8811 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD2LNd16_UPD;
8812 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD2LNd32_UPD;
8813 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2;
return ARM::VLD2LNq16_UPD;
8814 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2;
return ARM::VLD2LNq32_UPD;
8815 case ARM::VLD2LNdAsm_8: Spacing = 1;
return ARM::VLD2LNd8;
8816 case ARM::VLD2LNdAsm_16: Spacing = 1;
return ARM::VLD2LNd16;
8817 case ARM::VLD2LNdAsm_32: Spacing = 1;
return ARM::VLD2LNd32;
8818 case ARM::VLD2LNqAsm_16: Spacing = 2;
return ARM::VLD2LNq16;
8819 case ARM::VLD2LNqAsm_32: Spacing = 2;
return ARM::VLD2LNq32;
8822 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3DUPd8_UPD;
8823 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3DUPd16_UPD;
8824 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD3DUPd32_UPD;
8825 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3DUPq8_UPD;
8826 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD3DUPq16_UPD;
8827 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD3DUPq32_UPD;
8828 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1;
return ARM::VLD3DUPd8_UPD;
8829 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1;
return ARM::VLD3DUPd16_UPD;
8830 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1;
return ARM::VLD3DUPd32_UPD;
8831 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2;
return ARM::VLD3DUPq8_UPD;
8832 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2;
return ARM::VLD3DUPq16_UPD;
8833 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2;
return ARM::VLD3DUPq32_UPD;
8834 case ARM::VLD3DUPdAsm_8: Spacing = 1;
return ARM::VLD3DUPd8;
8835 case ARM::VLD3DUPdAsm_16: Spacing = 1;
return ARM::VLD3DUPd16;
8836 case ARM::VLD3DUPdAsm_32: Spacing = 1;
return ARM::VLD3DUPd32;
8837 case ARM::VLD3DUPqAsm_8: Spacing = 2;
return ARM::VLD3DUPq8;
8838 case ARM::VLD3DUPqAsm_16: Spacing = 2;
return ARM::VLD3DUPq16;
8839 case ARM::VLD3DUPqAsm_32: Spacing = 2;
return ARM::VLD3DUPq32;
8842 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3LNd8_UPD;
8843 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3LNd16_UPD;
8844 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD3LNd32_UPD;
8845 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3LNq16_UPD;
8846 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD3LNq32_UPD;
8847 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD3LNd8_UPD;
8848 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD3LNd16_UPD;
8849 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD3LNd32_UPD;
8850 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2;
return ARM::VLD3LNq16_UPD;
8851 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2;
return ARM::VLD3LNq32_UPD;
8852 case ARM::VLD3LNdAsm_8: Spacing = 1;
return ARM::VLD3LNd8;
8853 case ARM::VLD3LNdAsm_16: Spacing = 1;
return ARM::VLD3LNd16;
8854 case ARM::VLD3LNdAsm_32: Spacing = 1;
return ARM::VLD3LNd32;
8855 case ARM::VLD3LNqAsm_16: Spacing = 2;
return ARM::VLD3LNq16;
8856 case ARM::VLD3LNqAsm_32: Spacing = 2;
return ARM::VLD3LNq32;
8859 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3d8_UPD;
8860 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3d16_UPD;
8861 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD3d32_UPD;
8862 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2;
return ARM::VLD3q8_UPD;
8863 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD3q16_UPD;
8864 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD3q32_UPD;
8865 case ARM::VLD3dWB_register_Asm_8: Spacing = 1;
return ARM::VLD3d8_UPD;
8866 case ARM::VLD3dWB_register_Asm_16: Spacing = 1;
return ARM::VLD3d16_UPD;
8867 case ARM::VLD3dWB_register_Asm_32: Spacing = 1;
return ARM::VLD3d32_UPD;
8868 case ARM::VLD3qWB_register_Asm_8: Spacing = 2;
return ARM::VLD3q8_UPD;
8869 case ARM::VLD3qWB_register_Asm_16: Spacing = 2;
return ARM::VLD3q16_UPD;
8870 case ARM::VLD3qWB_register_Asm_32: Spacing = 2;
return ARM::VLD3q32_UPD;
8871 case ARM::VLD3dAsm_8: Spacing = 1;
return ARM::VLD3d8;
8872 case ARM::VLD3dAsm_16: Spacing = 1;
return ARM::VLD3d16;
8873 case ARM::VLD3dAsm_32: Spacing = 1;
return ARM::VLD3d32;
8874 case ARM::VLD3qAsm_8: Spacing = 2;
return ARM::VLD3q8;
8875 case ARM::VLD3qAsm_16: Spacing = 2;
return ARM::VLD3q16;
8876 case ARM::VLD3qAsm_32: Spacing = 2;
return ARM::VLD3q32;
8879 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4LNd8_UPD;
8880 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4LNd16_UPD;
8881 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD4LNd32_UPD;
8882 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD4LNq16_UPD;
8883 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD4LNq32_UPD;
8884 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD4LNd8_UPD;
8885 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD4LNd16_UPD;
8886 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD4LNd32_UPD;
8887 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2;
return ARM::VLD4LNq16_UPD;
8888 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2;
return ARM::VLD4LNq32_UPD;
8889 case ARM::VLD4LNdAsm_8: Spacing = 1;
return ARM::VLD4LNd8;
8890 case ARM::VLD4LNdAsm_16: Spacing = 1;
return ARM::VLD4LNd16;
8891 case ARM::VLD4LNdAsm_32: Spacing = 1;
return ARM::VLD4LNd32;
8892 case ARM::VLD4LNqAsm_16: Spacing = 2;
return ARM::VLD4LNq16;
8893 case ARM::VLD4LNqAsm_32: Spacing = 2;
return ARM::VLD4LNq32;
8896 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4DUPd8_UPD;
8897 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4DUPd16_UPD;
8898 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD4DUPd32_UPD;
8899 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4DUPq8_UPD;
8900 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4DUPq16_UPD;
8901 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD4DUPq32_UPD;
8902 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1;
return ARM::VLD4DUPd8_UPD;
8903 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1;
return ARM::VLD4DUPd16_UPD;
8904 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1;
return ARM::VLD4DUPd32_UPD;
8905 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2;
return ARM::VLD4DUPq8_UPD;
8906 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2;
return ARM::VLD4DUPq16_UPD;
8907 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2;
return ARM::VLD4DUPq32_UPD;
8908 case ARM::VLD4DUPdAsm_8: Spacing = 1;
return ARM::VLD4DUPd8;
8909 case ARM::VLD4DUPdAsm_16: Spacing = 1;
return ARM::VLD4DUPd16;
8910 case ARM::VLD4DUPdAsm_32: Spacing = 1;
return ARM::VLD4DUPd32;
8911 case ARM::VLD4DUPqAsm_8: Spacing = 2;
return ARM::VLD4DUPq8;
8912 case ARM::VLD4DUPqAsm_16: Spacing = 2;
return ARM::VLD4DUPq16;
8913 case ARM::VLD4DUPqAsm_32: Spacing = 2;
return ARM::VLD4DUPq32;
8916 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4d8_UPD;
8917 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4d16_UPD;
8918 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD4d32_UPD;
8919 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2;
return ARM::VLD4q8_UPD;
8920 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD4q16_UPD;
8921 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD4q32_UPD;
8922 case ARM::VLD4dWB_register_Asm_8: Spacing = 1;
return ARM::VLD4d8_UPD;
8923 case ARM::VLD4dWB_register_Asm_16: Spacing = 1;
return ARM::VLD4d16_UPD;
8924 case ARM::VLD4dWB_register_Asm_32: Spacing = 1;
return ARM::VLD4d32_UPD;
8925 case ARM::VLD4qWB_register_Asm_8: Spacing = 2;
return ARM::VLD4q8_UPD;
8926 case ARM::VLD4qWB_register_Asm_16: Spacing = 2;
return ARM::VLD4q16_UPD;
8927 case ARM::VLD4qWB_register_Asm_32: Spacing = 2;
return ARM::VLD4q32_UPD;
8928 case ARM::VLD4dAsm_8: Spacing = 1;
return ARM::VLD4d8;
8929 case ARM::VLD4dAsm_16: Spacing = 1;
return ARM::VLD4d16;
8930 case ARM::VLD4dAsm_32: Spacing = 1;
return ARM::VLD4d32;
8931 case ARM::VLD4qAsm_8: Spacing = 2;
return ARM::VLD4q8;
8932 case ARM::VLD4qAsm_16: Spacing = 2;
return ARM::VLD4q16;
8933 case ARM::VLD4qAsm_32: Spacing = 2;
return ARM::VLD4q32;
8937bool ARMAsmParser::processInstruction(MCInst &Inst,
8939 unsigned MnemonicOpsEndInd,
8943 bool HasWideQualifier =
false;
8944 for (
auto &
Op : Operands) {
8945 ARMOperand &ARMOp =
static_cast<ARMOperand&
>(*Op);
8946 if (ARMOp.isToken() && ARMOp.getToken() ==
".w") {
8947 HasWideQualifier =
true;
8957 if (Operands.size() ==
8958 MnemonicOpsEndInd + 2) {
8959 ARMOperand &
Op =
static_cast<ARMOperand &
>(
8960 *Operands[MnemonicOpsEndInd + 1]);
8962 auto &RegList =
Op.getRegList();
8965 if (RegList.size() == 32) {
8966 const unsigned Opcode =
8967 (Inst.
getOpcode() == ARM::VLLDM) ? ARM::VLLDM_T2 : ARM::VLSTM_T2;
8981 case ARM::LDRT_POST:
8982 case ARM::LDRBT_POST: {
8983 const unsigned Opcode =
8984 (Inst.
getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
8985 : ARM::LDRBT_POST_IMM;
9001 case ARM::LDRSHTii: {
9006 else if (Inst.
getOpcode() == ARM::LDRHTii)
9008 else if (Inst.
getOpcode() == ARM::LDRSHTii)
9019 case ARM::STRT_POST:
9020 case ARM::STRBT_POST: {
9021 const unsigned Opcode =
9022 (Inst.
getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
9023 : ARM::STRBT_POST_IMM;
9072 case ARM::t2LDR_PRE_imm:
9073 case ARM::t2LDR_POST_imm: {
9087 case ARM::t2STR_PRE_imm:
9088 case ARM::t2STR_POST_imm: {
9102 case ARM::t2LDRB_OFFSET_imm: {
9112 case ARM::t2LDRB_PRE_imm:
9113 case ARM::t2LDRB_POST_imm: {
9117 : ARM::t2LDRB_POST);
9128 case ARM::t2STRB_OFFSET_imm: {
9138 case ARM::t2STRB_PRE_imm:
9139 case ARM::t2STRB_POST_imm: {
9143 : ARM::t2STRB_POST);
9154 case ARM::t2LDRH_OFFSET_imm: {
9164 case ARM::t2LDRH_PRE_imm:
9165 case ARM::t2LDRH_POST_imm: {
9169 : ARM::t2LDRH_POST);
9180 case ARM::t2STRH_OFFSET_imm: {
9190 case ARM::t2STRH_PRE_imm:
9191 case ARM::t2STRH_POST_imm: {
9195 : ARM::t2STRH_POST);
9206 case ARM::t2LDRSB_OFFSET_imm: {
9216 case ARM::t2LDRSB_PRE_imm:
9217 case ARM::t2LDRSB_POST_imm: {
9221 : ARM::t2LDRSB_POST);
9232 case ARM::t2LDRSH_OFFSET_imm: {
9242 case ARM::t2LDRSH_PRE_imm:
9243 case ARM::t2LDRSH_POST_imm: {
9247 : ARM::t2LDRSH_POST);
9258 case ARM::t2LDRpcrel:
9267 case ARM::t2LDRBpcrel:
9270 case ARM::t2LDRHpcrel:
9273 case ARM::t2LDRSBpcrel:
9276 case ARM::t2LDRSHpcrel:
9279 case ARM::LDRConstPool:
9280 case ARM::tLDRConstPool:
9281 case ARM::t2LDRConstPool: {
9286 if (Inst.
getOpcode() == ARM::LDRConstPool)
9288 else if (Inst.
getOpcode() == ARM::tLDRConstPool)
9290 else if (Inst.
getOpcode() == ARM::t2LDRConstPool)
9292 const ARMOperand &PoolOperand =
9293 static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 1]);
9294 const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
9302 bool MovHasS =
true;
9303 if (Inst.
getOpcode() == ARM::LDRConstPool) {
9313 else if (hasV6T2Ops() &&
9326 else if (hasThumb2() &&
9331 else if (hasV8MBaseline() &&
9351 const MCExpr *CPLoc =
9352 getTargetStreamer().addConstantPoolEntry(SubExprVal,
9353 PoolOperand.getStartLoc());
9364 case ARM::VST1LNdWB_register_Asm_8:
9365 case ARM::VST1LNdWB_register_Asm_16:
9366 case ARM::VST1LNdWB_register_Asm_32: {
9384 case ARM::VST2LNdWB_register_Asm_8:
9385 case ARM::VST2LNdWB_register_Asm_16:
9386 case ARM::VST2LNdWB_register_Asm_32:
9387 case ARM::VST2LNqWB_register_Asm_16:
9388 case ARM::VST2LNqWB_register_Asm_32: {
9408 case ARM::VST3LNdWB_register_Asm_8:
9409 case ARM::VST3LNdWB_register_Asm_16:
9410 case ARM::VST3LNdWB_register_Asm_32:
9411 case ARM::VST3LNqWB_register_Asm_16:
9412 case ARM::VST3LNqWB_register_Asm_32: {
9434 case ARM::VST4LNdWB_register_Asm_8:
9435 case ARM::VST4LNdWB_register_Asm_16:
9436 case ARM::VST4LNdWB_register_Asm_32:
9437 case ARM::VST4LNqWB_register_Asm_16:
9438 case ARM::VST4LNqWB_register_Asm_32: {
9462 case ARM::VST1LNdWB_fixed_Asm_8:
9463 case ARM::VST1LNdWB_fixed_Asm_16:
9464 case ARM::VST1LNdWB_fixed_Asm_32: {
9482 case ARM::VST2LNdWB_fixed_Asm_8:
9483 case ARM::VST2LNdWB_fixed_Asm_16:
9484 case ARM::VST2LNdWB_fixed_Asm_32:
9485 case ARM::VST2LNqWB_fixed_Asm_16:
9486 case ARM::VST2LNqWB_fixed_Asm_32: {
9506 case ARM::VST3LNdWB_fixed_Asm_8:
9507 case ARM::VST3LNdWB_fixed_Asm_16:
9508 case ARM::VST3LNdWB_fixed_Asm_32:
9509 case ARM::VST3LNqWB_fixed_Asm_16:
9510 case ARM::VST3LNqWB_fixed_Asm_32: {
9532 case ARM::VST4LNdWB_fixed_Asm_8:
9533 case ARM::VST4LNdWB_fixed_Asm_16:
9534 case ARM::VST4LNdWB_fixed_Asm_32:
9535 case ARM::VST4LNqWB_fixed_Asm_16:
9536 case ARM::VST4LNqWB_fixed_Asm_32: {
9560 case ARM::VST1LNdAsm_8:
9561 case ARM::VST1LNdAsm_16:
9562 case ARM::VST1LNdAsm_32: {
9578 case ARM::VST2LNdAsm_8:
9579 case ARM::VST2LNdAsm_16:
9580 case ARM::VST2LNdAsm_32:
9581 case ARM::VST2LNqAsm_16:
9582 case ARM::VST2LNqAsm_32: {
9600 case ARM::VST3LNdAsm_8:
9601 case ARM::VST3LNdAsm_16:
9602 case ARM::VST3LNdAsm_32:
9603 case ARM::VST3LNqAsm_16:
9604 case ARM::VST3LNqAsm_32: {
9624 case ARM::VST4LNdAsm_8:
9625 case ARM::VST4LNdAsm_16:
9626 case ARM::VST4LNdAsm_32:
9627 case ARM::VST4LNqAsm_16:
9628 case ARM::VST4LNqAsm_32: {
9651 case ARM::VLD1LNdWB_register_Asm_8:
9652 case ARM::VLD1LNdWB_register_Asm_16:
9653 case ARM::VLD1LNdWB_register_Asm_32: {
9672 case ARM::VLD2LNdWB_register_Asm_8:
9673 case ARM::VLD2LNdWB_register_Asm_16:
9674 case ARM::VLD2LNdWB_register_Asm_32:
9675 case ARM::VLD2LNqWB_register_Asm_16:
9676 case ARM::VLD2LNqWB_register_Asm_32: {
9699 case ARM::VLD3LNdWB_register_Asm_8:
9700 case ARM::VLD3LNdWB_register_Asm_16:
9701 case ARM::VLD3LNdWB_register_Asm_32:
9702 case ARM::VLD3LNqWB_register_Asm_16:
9703 case ARM::VLD3LNqWB_register_Asm_32: {
9730 case ARM::VLD4LNdWB_register_Asm_8:
9731 case ARM::VLD4LNdWB_register_Asm_16:
9732 case ARM::VLD4LNdWB_register_Asm_32:
9733 case ARM::VLD4LNqWB_register_Asm_16:
9734 case ARM::VLD4LNqWB_register_Asm_32: {
9765 case ARM::VLD1LNdWB_fixed_Asm_8:
9766 case ARM::VLD1LNdWB_fixed_Asm_16:
9767 case ARM::VLD1LNdWB_fixed_Asm_32: {
9786 case ARM::VLD2LNdWB_fixed_Asm_8:
9787 case ARM::VLD2LNdWB_fixed_Asm_16:
9788 case ARM::VLD2LNdWB_fixed_Asm_32:
9789 case ARM::VLD2LNqWB_fixed_Asm_16:
9790 case ARM::VLD2LNqWB_fixed_Asm_32: {
9813 case ARM::VLD3LNdWB_fixed_Asm_8:
9814 case ARM::VLD3LNdWB_fixed_Asm_16:
9815 case ARM::VLD3LNdWB_fixed_Asm_32:
9816 case ARM::VLD3LNqWB_fixed_Asm_16:
9817 case ARM::VLD3LNqWB_fixed_Asm_32: {
9844 case ARM::VLD4LNdWB_fixed_Asm_8:
9845 case ARM::VLD4LNdWB_fixed_Asm_16:
9846 case ARM::VLD4LNdWB_fixed_Asm_32:
9847 case ARM::VLD4LNqWB_fixed_Asm_16:
9848 case ARM::VLD4LNqWB_fixed_Asm_32: {
9879 case ARM::VLD1LNdAsm_8:
9880 case ARM::VLD1LNdAsm_16:
9881 case ARM::VLD1LNdAsm_32: {
9898 case ARM::VLD2LNdAsm_8:
9899 case ARM::VLD2LNdAsm_16:
9900 case ARM::VLD2LNdAsm_32:
9901 case ARM::VLD2LNqAsm_16:
9902 case ARM::VLD2LNqAsm_32: {
9923 case ARM::VLD3LNdAsm_8:
9924 case ARM::VLD3LNdAsm_16:
9925 case ARM::VLD3LNdAsm_32:
9926 case ARM::VLD3LNqAsm_16:
9927 case ARM::VLD3LNqAsm_32: {
9952 case ARM::VLD4LNdAsm_8:
9953 case ARM::VLD4LNdAsm_16:
9954 case ARM::VLD4LNdAsm_32:
9955 case ARM::VLD4LNqAsm_16:
9956 case ARM::VLD4LNqAsm_32: {
9986 case ARM::VLD3DUPdAsm_8:
9987 case ARM::VLD3DUPdAsm_16:
9988 case ARM::VLD3DUPdAsm_32:
9989 case ARM::VLD3DUPqAsm_8:
9990 case ARM::VLD3DUPqAsm_16:
9991 case ARM::VLD3DUPqAsm_32: {
10008 case ARM::VLD3DUPdWB_fixed_Asm_8:
10009 case ARM::VLD3DUPdWB_fixed_Asm_16:
10010 case ARM::VLD3DUPdWB_fixed_Asm_32:
10011 case ARM::VLD3DUPqWB_fixed_Asm_8:
10012 case ARM::VLD3DUPqWB_fixed_Asm_16:
10013 case ARM::VLD3DUPqWB_fixed_Asm_32: {
10032 case ARM::VLD3DUPdWB_register_Asm_8:
10033 case ARM::VLD3DUPdWB_register_Asm_16:
10034 case ARM::VLD3DUPdWB_register_Asm_32:
10035 case ARM::VLD3DUPqWB_register_Asm_8:
10036 case ARM::VLD3DUPqWB_register_Asm_16:
10037 case ARM::VLD3DUPqWB_register_Asm_32: {
10057 case ARM::VLD3dAsm_8:
10058 case ARM::VLD3dAsm_16:
10059 case ARM::VLD3dAsm_32:
10060 case ARM::VLD3qAsm_8:
10061 case ARM::VLD3qAsm_16:
10062 case ARM::VLD3qAsm_32: {
10079 case ARM::VLD3dWB_fixed_Asm_8:
10080 case ARM::VLD3dWB_fixed_Asm_16:
10081 case ARM::VLD3dWB_fixed_Asm_32:
10082 case ARM::VLD3qWB_fixed_Asm_8:
10083 case ARM::VLD3qWB_fixed_Asm_16:
10084 case ARM::VLD3qWB_fixed_Asm_32: {
10103 case ARM::VLD3dWB_register_Asm_8:
10104 case ARM::VLD3dWB_register_Asm_16:
10105 case ARM::VLD3dWB_register_Asm_32:
10106 case ARM::VLD3qWB_register_Asm_8:
10107 case ARM::VLD3qWB_register_Asm_16:
10108 case ARM::VLD3qWB_register_Asm_32: {
10128 case ARM::VLD4DUPdAsm_8:
10129 case ARM::VLD4DUPdAsm_16:
10130 case ARM::VLD4DUPdAsm_32:
10131 case ARM::VLD4DUPqAsm_8:
10132 case ARM::VLD4DUPqAsm_16:
10133 case ARM::VLD4DUPqAsm_32: {
10152 case ARM::VLD4DUPdWB_fixed_Asm_8:
10153 case ARM::VLD4DUPdWB_fixed_Asm_16:
10154 case ARM::VLD4DUPdWB_fixed_Asm_32:
10155 case ARM::VLD4DUPqWB_fixed_Asm_8:
10156 case ARM::VLD4DUPqWB_fixed_Asm_16:
10157 case ARM::VLD4DUPqWB_fixed_Asm_32: {
10178 case ARM::VLD4DUPdWB_register_Asm_8:
10179 case ARM::VLD4DUPdWB_register_Asm_16:
10180 case ARM::VLD4DUPdWB_register_Asm_32:
10181 case ARM::VLD4DUPqWB_register_Asm_8:
10182 case ARM::VLD4DUPqWB_register_Asm_16:
10183 case ARM::VLD4DUPqWB_register_Asm_32: {
10205 case ARM::VLD4dAsm_8:
10206 case ARM::VLD4dAsm_16:
10207 case ARM::VLD4dAsm_32:
10208 case ARM::VLD4qAsm_8:
10209 case ARM::VLD4qAsm_16:
10210 case ARM::VLD4qAsm_32: {
10229 case ARM::VLD4dWB_fixed_Asm_8:
10230 case ARM::VLD4dWB_fixed_Asm_16:
10231 case ARM::VLD4dWB_fixed_Asm_32:
10232 case ARM::VLD4qWB_fixed_Asm_8:
10233 case ARM::VLD4qWB_fixed_Asm_16:
10234 case ARM::VLD4qWB_fixed_Asm_32: {
10255 case ARM::VLD4dWB_register_Asm_8:
10256 case ARM::VLD4dWB_register_Asm_16:
10257 case ARM::VLD4dWB_register_Asm_32:
10258 case ARM::VLD4qWB_register_Asm_8:
10259 case ARM::VLD4qWB_register_Asm_16:
10260 case ARM::VLD4qWB_register_Asm_32: {
10282 case ARM::VST3dAsm_8:
10283 case ARM::VST3dAsm_16:
10284 case ARM::VST3dAsm_32:
10285 case ARM::VST3qAsm_8:
10286 case ARM::VST3qAsm_16:
10287 case ARM::VST3qAsm_32: {
10304 case ARM::VST3dWB_fixed_Asm_8:
10305 case ARM::VST3dWB_fixed_Asm_16:
10306 case ARM::VST3dWB_fixed_Asm_32:
10307 case ARM::VST3qWB_fixed_Asm_8:
10308 case ARM::VST3qWB_fixed_Asm_16:
10309 case ARM::VST3qWB_fixed_Asm_32: {
10328 case ARM::VST3dWB_register_Asm_8:
10329 case ARM::VST3dWB_register_Asm_16:
10330 case ARM::VST3dWB_register_Asm_32:
10331 case ARM::VST3qWB_register_Asm_8:
10332 case ARM::VST3qWB_register_Asm_16:
10333 case ARM::VST3qWB_register_Asm_32: {
10353 case ARM::VST4dAsm_8:
10354 case ARM::VST4dAsm_16:
10355 case ARM::VST4dAsm_32:
10356 case ARM::VST4qAsm_8:
10357 case ARM::VST4qAsm_16:
10358 case ARM::VST4qAsm_32: {
10377 case ARM::VST4dWB_fixed_Asm_8:
10378 case ARM::VST4dWB_fixed_Asm_16:
10379 case ARM::VST4dWB_fixed_Asm_32:
10380 case ARM::VST4qWB_fixed_Asm_8:
10381 case ARM::VST4qWB_fixed_Asm_16:
10382 case ARM::VST4qWB_fixed_Asm_32: {
10403 case ARM::VST4dWB_register_Asm_8:
10404 case ARM::VST4dWB_register_Asm_16:
10405 case ARM::VST4dWB_register_Asm_32:
10406 case ARM::VST4qWB_register_Asm_8:
10407 case ARM::VST4qWB_register_Asm_16:
10408 case ARM::VST4qWB_register_Asm_32: {
10436 (inITBlock() ? ARM::NoRegister : ARM::CPSR) &&
10437 !HasWideQualifier) {
10441 case ARM::t2LSLri: NewOpc = ARM::tLSLri;
break;
10442 case ARM::t2LSRri: NewOpc = ARM::tLSRri;
break;
10443 case ARM::t2ASRri: NewOpc = ARM::tASRri;
break;
10461 case ARM::t2MOVSsr: {
10465 bool isNarrow =
false;
10470 inITBlock() == (Inst.
getOpcode() == ARM::t2MOVsr) &&
10477 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr;
break;
10478 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr;
break;
10479 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr;
break;
10480 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr;
break;
10486 Inst.
getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : ARM::NoRegister));
10493 Inst.
getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : ARM::NoRegister));
10498 case ARM::t2MOVSsi: {
10502 bool isNarrow =
false;
10505 inITBlock() == (Inst.
getOpcode() == ARM::t2MOVsi) &&
10512 bool isMov =
false;
10523 newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
10527 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri;
break;
10528 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri;
break;
10529 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri;
break;
10530 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow =
false;
break;
10531 case ARM_AM::rrx: isNarrow =
false; newOpc = ARM::t2RRX;
break;
10534 if (Amount == 32) Amount = 0;
10537 if (isNarrow && !isMov)
10539 Inst.
getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : ARM::NoRegister));
10541 if (newOpc != ARM::t2RRX && !isMov)
10547 Inst.
getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : ARM::NoRegister));
10591 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
10600 if (
Opc == ARM::MOVsi)
10621 case ARM::t2LDMIA_UPD: {
10637 case ARM::t2STMDB_UPD: {
10653 case ARM::LDMIA_UPD:
10656 if (
static_cast<ARMOperand &
>(*Operands[0]).
getToken() ==
"pop" &&
10671 case ARM::STMDB_UPD:
10674 if (
static_cast<ARMOperand &
>(*Operands[0]).
getToken() ==
"push" &&
10687 case ARM::t2ADDri12:
10688 case ARM::t2SUBri12:
10689 case ARM::t2ADDspImm12:
10690 case ARM::t2SUBspImm12: {
10693 const StringRef Token =
static_cast<ARMOperand &
>(*Operands[0]).
getToken();
10694 if ((Token !=
"add" && Token !=
"sub") ||
10698 case ARM::t2ADDri12:
10701 case ARM::t2SUBri12:
10704 case ARM::t2ADDspImm12:
10707 case ARM::t2SUBspImm12:
10722 Operands.size() == MnemonicOpsEndInd + 3) {
10733 Operands.size() == MnemonicOpsEndInd + 3) {
10739 case ARM::t2SUBri: {
10744 if (HasWideQualifier)
10751 (inITBlock() ? ARM::NoRegister : ARM::CPSR))
10757 int i = (Operands[MnemonicOpsEndInd + 1]->isImm())
10758 ? MnemonicOpsEndInd + 1
10759 : MnemonicOpsEndInd + 2;
10760 MCParsedAsmOperand &
Op = *Operands[i];
10766 ARM::tADDi8 : ARM::tSUBi8);
10776 case ARM::t2ADDspImm:
10777 case ARM::t2SUBspImm: {
10782 if (V & 3 || V > ((1 << 7) - 1) << 2)
10795 case ARM::t2ADDrr: {
10857 case ARM::tLDMIA: {
10863 bool hasWritebackToken =
10864 (
static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 1])
10866 static_cast<ARMOperand &
>(*Operands[MnemonicOpsEndInd + 1])
10868 bool listContainsBase;
10870 (!listContainsBase && !hasWritebackToken) ||
10871 (listContainsBase && hasWritebackToken)) {
10874 Inst.
setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
10877 if (hasWritebackToken)
10884 case ARM::tSTMIA_UPD: {
10889 bool listContainsBase;
10899 bool listContainsBase;
10913 bool listContainsBase;
10930 (inITBlock() ? ARM::NoRegister : ARM::CPSR) &&
10931 !HasWideQualifier) {
10952 !HasWideQualifier) {
10959 if (
Op == ARM::tMOVr) {
10977 !HasWideQualifier) {
10981 case ARM::t2SXTH: NewOpc = ARM::tSXTH;
break;
10982 case ARM::t2SXTB: NewOpc = ARM::tSXTB;
break;
10983 case ARM::t2UXTH: NewOpc = ARM::tUXTH;
break;
10984 case ARM::t2UXTB: NewOpc = ARM::tUXTB;
break;
11022 case ARM::ADDrsi: {
11028 case ARM::ANDrsi: newOpc = ARM::ANDrr;
break;
11029 case ARM::ORRrsi: newOpc = ARM::ORRrr;
break;
11030 case ARM::EORrsi: newOpc = ARM::EORrr;
break;
11031 case ARM::BICrsi: newOpc = ARM::BICrr;
break;
11032 case ARM::SUBrsi: newOpc = ARM::SUBrr;
break;
11033 case ARM::ADDrsi: newOpc = ARM::ADDrr;
break;
11056 assert(!inITBlock() &&
"nested IT blocks?!");
11072 (inITBlock() ? ARM::NoRegister : ARM::CPSR) &&
11073 !HasWideQualifier) {
11077 case ARM::t2LSLrr: NewOpc = ARM::tLSLrr;
break;
11078 case ARM::t2LSRrr: NewOpc = ARM::tLSRrr;
break;
11079 case ARM::t2ASRrr: NewOpc = ARM::tASRrr;
break;
11080 case ARM::t2SBCrr: NewOpc = ARM::tSBC;
break;
11081 case ARM::t2RORrr: NewOpc = ARM::tROR;
break;
11082 case ARM::t2BICrr: NewOpc = ARM::tBIC;
break;
11109 (inITBlock() ? ARM::NoRegister : ARM::CPSR) &&
11110 !HasWideQualifier) {
11114 case ARM::t2ADCrr: NewOpc = ARM::tADC;
break;
11115 case ARM::t2ANDrr: NewOpc = ARM::tAND;
break;
11116 case ARM::t2EORrr: NewOpc = ARM::tEOR;
break;
11117 case ARM::t2ORRrr: NewOpc = ARM::tORR;
break;
11136 case ARM::MVE_VPST:
11137 case ARM::MVE_VPTv16i8:
11138 case ARM::MVE_VPTv8i16:
11139 case ARM::MVE_VPTv4i32:
11140 case ARM::MVE_VPTv16u8:
11141 case ARM::MVE_VPTv8u16:
11142 case ARM::MVE_VPTv4u32:
11143 case ARM::MVE_VPTv16s8:
11144 case ARM::MVE_VPTv8s16:
11145 case ARM::MVE_VPTv4s32:
11146 case ARM::MVE_VPTv4f32:
11147 case ARM::MVE_VPTv8f16:
11148 case ARM::MVE_VPTv16i8r:
11149 case ARM::MVE_VPTv8i16r:
11150 case ARM::MVE_VPTv4i32r:
11151 case ARM::MVE_VPTv16u8r:
11152 case ARM::MVE_VPTv8u16r:
11153 case ARM::MVE_VPTv4u32r:
11154 case ARM::MVE_VPTv16s8r:
11155 case ARM::MVE_VPTv8s16r:
11156 case ARM::MVE_VPTv4s32r:
11157 case ARM::MVE_VPTv4f32r:
11158 case ARM::MVE_VPTv8f16r: {
11159 assert(!inVPTBlock() &&
"Nested VPT blocks are not allowed");
11161 VPTState.Mask = MO.
getImm();
11162 VPTState.CurPosition = 0;
11170ARMAsmParser::checkEarlyTargetMatchPredicate(MCInst &Inst,
11177 if (Operands[0]->isToken() &&
11178 static_cast<ARMOperand &
>(*Operands[0]).
getToken() ==
"nop" &&
11179 ((
isThumb() && !isThumbOne()) || hasV6MOps())) {
11180 return Match_MnemonicFail;
11185 return Match_Success;
11189unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
11193 const MCInstrDesc &MCID = MII.get(
Opc);
11196 "optionally flag setting instruction missing optional def operand");
11198 "operand count mismatch!");
11199 bool IsCPSR =
false;
11201 for (
unsigned OpNo = 0; OpNo < MCID.
NumOperands; ++OpNo) {
11202 if (MCID.
operands()[OpNo].isOptionalDef() &&
11209 if (isThumbOne() && !IsCPSR)
11210 return Match_RequiresFlagSetting;
11213 if (isThumbTwo() && !IsCPSR && !inITBlock())
11214 return Match_RequiresITBlock;
11215 if (isThumbTwo() && IsCPSR && inITBlock())
11216 return Match_RequiresNotITBlock;
11219 return Match_RequiresNotITBlock;
11220 }
else if (isThumbOne()) {
11223 if (
Opc == ARM::tADDhirr && !hasV6MOps() &&
11226 return Match_RequiresThumb2;
11228 else if (
Opc == ARM::tMOVr && !hasV6Ops() &&
11231 return Match_RequiresV6;
11237 if (
Opc == ARM::t2MOVr && !hasV8Ops())
11242 return Match_RequiresV8;
11247 return Match_RequiresV8;
11253 case ARM::VMRS_FPCXTS:
11254 case ARM::VMRS_FPCXTNS:
11255 case ARM::VMSR_FPCXTS:
11256 case ARM::VMSR_FPCXTNS:
11257 case ARM::VMRS_FPSCR_NZCVQC:
11258 case ARM::VMSR_FPSCR_NZCVQC:
11260 case ARM::VMRS_VPR:
11262 case ARM::VMSR_VPR:
11268 return Match_InvalidOperand;
11274 return Match_RequiresV8;
11282 return Match_InvalidTiedOperand;
11289 if (MCID.
operands()[
I].RegClass == ARM::rGPRRegClassID) {
11304 MCRegister
Reg =
Op.getReg();
11305 if ((
Reg == ARM::SP) && !hasV8Ops())
11306 return Match_RequiresV8;
11307 else if (
Reg == ARM::PC)
11308 return Match_InvalidOperand;
11311 return Match_Success;
11324bool ARMAsmParser::isITBlockTerminator(MCInst &Inst)
const {
11325 const MCInstrDesc &MCID = MII.get(Inst.
getOpcode());
11341unsigned ARMAsmParser::MatchInstruction(
OperandVector &Operands, MCInst &Inst,
11342 SmallVectorImpl<NearMissInfo> &NearMisses,
11343 bool MatchingInlineAsm,
11344 bool &EmitInITBlock,
11347 if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
11348 return MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
11352 if (inImplicitITBlock()) {
11353 extendImplicitITBlock(ITState.Cond);
11354 if (MatchInstructionImpl(Operands, Inst,
nullptr, MatchingInlineAsm) ==
11358 const MCInstrDesc &MCID = MII.get(Inst.
getOpcode());
11364 if (InstCond == ITCond) {
11365 EmitInITBlock =
true;
11366 return Match_Success;
11368 invertCurrentITCondition();
11369 EmitInITBlock =
true;
11370 return Match_Success;
11374 rewindImplicitITPosition();
11378 flushPendingInstructions(Out);
11379 unsigned PlainMatchResult =
11380 MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
11381 if (PlainMatchResult == Match_Success) {
11382 const MCInstrDesc &MCID = MII.get(Inst.
getOpcode());
11390 EmitInITBlock =
false;
11391 return Match_Success;
11394 EmitInITBlock =
false;
11395 return Match_Success;
11398 EmitInITBlock =
false;
11399 return Match_Success;
11406 startImplicitITBlock();
11407 if (MatchInstructionImpl(Operands, Inst,
nullptr, MatchingInlineAsm) ==
11409 const MCInstrDesc &MCID = MII.get(Inst.
getOpcode());
11414 EmitInITBlock =
true;
11415 return Match_Success;
11418 discardImplicitITBlock();
11422 EmitInITBlock =
false;
11423 return PlainMatchResult;
11427 unsigned VariantID = 0);
11430bool ARMAsmParser::matchAndEmitInstruction(
SMLoc IDLoc,
unsigned &Opcode,
11433 bool MatchingInlineAsm) {
11435 unsigned MatchResult;
11436 bool PendConditionalInstruction =
false;
11439 MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm,
11440 PendConditionalInstruction, Out);
11445 switch (MatchResult) {
11446 case Match_Success:
11453 if (validateInstruction(Inst, Operands, MnemonicOpsEndInd)) {
11456 forwardITPosition();
11457 forwardVPTPosition();
11466 while (processInstruction(Inst, Operands, MnemonicOpsEndInd, Out))
11475 forwardITPosition();
11476 forwardVPTPosition();
11484 if (PendConditionalInstruction) {
11485 PendingConditionalInsts.
push_back(Inst);
11486 if (isITBlockFull() || isITBlockTerminator(Inst))
11487 flushPendingInstructions(Out);
11492 case Match_NearMisses:
11493 ReportNearMisses(NearMisses, IDLoc, Operands);
11495 case Match_MnemonicFail: {
11496 FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
11498 ((ARMOperand &)*Operands[0]).
getToken(), FBS);
11499 return Error(IDLoc,
"invalid instruction" + Suggestion,
11500 ((ARMOperand &)*Operands[0]).getLocRange());
11508bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
11514 if (IDVal ==
".word")
11515 parseLiteralValues(4, DirectiveID.
getLoc());
11516 else if (IDVal ==
".short" || IDVal ==
".hword")
11517 parseLiteralValues(2, DirectiveID.
getLoc());
11518 else if (IDVal ==
".thumb")
11519 parseDirectiveThumb(DirectiveID.
getLoc());
11520 else if (IDVal ==
".arm")
11521 parseDirectiveARM(DirectiveID.
getLoc());
11522 else if (IDVal ==
".thumb_func")
11523 parseDirectiveThumbFunc(DirectiveID.
getLoc());
11524 else if (IDVal ==
".code")
11525 parseDirectiveCode(DirectiveID.
getLoc());
11526 else if (IDVal ==
".syntax")
11527 parseDirectiveSyntax(DirectiveID.
getLoc());
11528 else if (IDVal ==
".unreq")
11529 parseDirectiveUnreq(DirectiveID.
getLoc());
11530 else if (IDVal ==
".fnend")
11531 parseDirectiveFnEnd(DirectiveID.
getLoc());
11532 else if (IDVal ==
".cantunwind")
11533 parseDirectiveCantUnwind(DirectiveID.
getLoc());
11534 else if (IDVal ==
".personality")
11535 parseDirectivePersonality(DirectiveID.
getLoc());
11536 else if (IDVal ==
".handlerdata")
11537 parseDirectiveHandlerData(DirectiveID.
getLoc());
11538 else if (IDVal ==
".setfp")
11539 parseDirectiveSetFP(DirectiveID.
getLoc());
11540 else if (IDVal ==
".pad")
11541 parseDirectivePad(DirectiveID.
getLoc());
11542 else if (IDVal ==
".save")
11543 parseDirectiveRegSave(DirectiveID.
getLoc(),
false);
11544 else if (IDVal ==
".vsave")
11545 parseDirectiveRegSave(DirectiveID.
getLoc(),
true);
11546 else if (IDVal ==
".ltorg" || IDVal ==
".pool")
11547 parseDirectiveLtorg(DirectiveID.
getLoc());
11548 else if (IDVal ==
".even")
11549 parseDirectiveEven(DirectiveID.
getLoc());
11550 else if (IDVal ==
".personalityindex")
11551 parseDirectivePersonalityIndex(DirectiveID.
getLoc());
11552 else if (IDVal ==
".unwind_raw")
11553 parseDirectiveUnwindRaw(DirectiveID.
getLoc());
11554 else if (IDVal ==
".movsp")
11555 parseDirectiveMovSP(DirectiveID.
getLoc());
11556 else if (IDVal ==
".arch_extension")
11557 parseDirectiveArchExtension(DirectiveID.
getLoc());
11558 else if (IDVal ==
".align")
11559 return parseDirectiveAlign(DirectiveID.
getLoc());
11560 else if (IDVal ==
".thumb_set")
11561 parseDirectiveThumbSet(DirectiveID.
getLoc());
11562 else if (IDVal ==
".inst")
11563 parseDirectiveInst(DirectiveID.
getLoc());
11564 else if (IDVal ==
".inst.n")
11565 parseDirectiveInst(DirectiveID.
getLoc(),
'n');
11566 else if (IDVal ==
".inst.w")
11567 parseDirectiveInst(DirectiveID.
getLoc(),
'w');
11568 else if (!IsMachO && !IsCOFF) {
11569 if (IDVal ==
".arch")
11570 parseDirectiveArch(DirectiveID.
getLoc());
11571 else if (IDVal ==
".cpu")
11572 parseDirectiveCPU(DirectiveID.
getLoc());
11573 else if (IDVal ==
".eabi_attribute")
11574 parseDirectiveEabiAttr(DirectiveID.
getLoc());
11575 else if (IDVal ==
".fpu")
11576 parseDirectiveFPU(DirectiveID.
getLoc());
11577 else if (IDVal ==
".fnstart")
11578 parseDirectiveFnStart(DirectiveID.
getLoc());
11579 else if (IDVal ==
".object_arch")
11580 parseDirectiveObjectArch(DirectiveID.
getLoc());
11581 else if (IDVal ==
".tlsdescseq")
11582 parseDirectiveTLSDescSeq(DirectiveID.
getLoc());
11585 }
else if (IsCOFF) {
11586 if (IDVal ==
".seh_stackalloc")
11587 parseDirectiveSEHAllocStack(DirectiveID.
getLoc(),
false);
11588 else if (IDVal ==
".seh_stackalloc_w")
11589 parseDirectiveSEHAllocStack(DirectiveID.
getLoc(),
true);
11590 else if (IDVal ==
".seh_save_regs")
11591 parseDirectiveSEHSaveRegs(DirectiveID.
getLoc(),
false);
11592 else if (IDVal ==
".seh_save_regs_w")
11593 parseDirectiveSEHSaveRegs(DirectiveID.
getLoc(),
true);
11594 else if (IDVal ==
".seh_save_sp")
11595 parseDirectiveSEHSaveSP(DirectiveID.
getLoc());
11596 else if (IDVal ==
".seh_save_fregs")
11597 parseDirectiveSEHSaveFRegs(DirectiveID.
getLoc());
11598 else if (IDVal ==
".seh_save_lr")
11599 parseDirectiveSEHSaveLR(DirectiveID.
getLoc());
11600 else if (IDVal ==
".seh_endprologue")
11601 parseDirectiveSEHPrologEnd(DirectiveID.
getLoc(),
false);
11602 else if (IDVal ==
".seh_endprologue_fragment")
11603 parseDirectiveSEHPrologEnd(DirectiveID.
getLoc(),
true);
11604 else if (IDVal ==
".seh_nop")
11605 parseDirectiveSEHNop(DirectiveID.
getLoc(),
false);
11606 else if (IDVal ==
".seh_nop_w")
11607 parseDirectiveSEHNop(DirectiveID.
getLoc(),
true);
11608 else if (IDVal ==
".seh_startepilogue")
11609 parseDirectiveSEHEpilogStart(DirectiveID.
getLoc(),
false);
11610 else if (IDVal ==
".seh_startepilogue_cond")
11611 parseDirectiveSEHEpilogStart(DirectiveID.
getLoc(),
true);
11612 else if (IDVal ==
".seh_endepilogue")
11613 parseDirectiveSEHEpilogEnd(DirectiveID.
getLoc());
11614 else if (IDVal ==
".seh_custom")
11615 parseDirectiveSEHCustom(DirectiveID.
getLoc());
11627bool ARMAsmParser::parseLiteralValues(
unsigned Size, SMLoc L) {
11628 auto parseOne = [&]() ->
bool {
11629 const MCExpr *
Value;
11630 if (getParser().parseExpression(
Value))
11632 getParser().getStreamer().emitValue(
Value,
Size, L);
11635 return (parseMany(parseOne));
11640bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
11641 if (parseEOL() || check(!hasThumb(), L,
"target does not support Thumb mode"))
11647 getTargetStreamer().emitCode16();
11648 getParser().getStreamer().emitCodeAlignment(
Align(2), &getSTI(), 0);
11654bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
11655 if (parseEOL() || check(!hasARM(), L,
"target does not support ARM mode"))
11660 getTargetStreamer().emitCode32();
11661 getParser().getStreamer().emitCodeAlignment(
Align(4), &getSTI(), 0);
11665void ARMAsmParser::doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) {
11668 flushPendingInstructions(getStreamer());
11671void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
11672 if (NextSymbolIsThumb) {
11673 getTargetStreamer().emitThumbFunc(Symbol);
11674 NextSymbolIsThumb =
false;
11680bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
11681 MCAsmParser &Parser = getParser();
11691 MCSymbol *
Func = getParser().getContext().getOrCreateSymbol(
11693 getTargetStreamer().emitThumbFunc(Func);
11708 getTargetStreamer().emitCode16();
11710 NextSymbolIsThumb =
true;
11716bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
11717 MCAsmParser &Parser = getParser();
11718 const AsmToken &Tok = Parser.
getTok();
11720 Error(L,
"unexpected token in .syntax directive");
11726 if (check(
Mode ==
"divided" ||
Mode ==
"DIVIDED", L,
11727 "'.syntax divided' arm assembly not supported") ||
11728 check(
Mode !=
"unified" &&
Mode !=
"UNIFIED", L,
11729 "unrecognized syntax mode in .syntax directive") ||
11740bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
11741 MCAsmParser &Parser = getParser();
11742 const AsmToken &Tok = Parser.
getTok();
11744 return Error(L,
"unexpected token in .code directive");
11746 if (Val != 16 && Val != 32) {
11747 Error(L,
"invalid operand to .code directive");
11757 return Error(L,
"target does not support Thumb mode");
11761 getTargetStreamer().emitCode16();
11764 return Error(L,
"target does not support ARM mode");
11768 getTargetStreamer().emitCode32();
11776bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
11777 MCAsmParser &Parser = getParser();
11780 SMLoc SRegLoc, ERegLoc;
11781 const bool parseResult = parseRegister(
Reg, SRegLoc, ERegLoc);
11782 if (check(parseResult, SRegLoc,
"register name expected") || parseEOL())
11785 if (RegisterReqs.
insert(std::make_pair(Name,
Reg)).first->second !=
Reg)
11786 return Error(SRegLoc,
11787 "redefinition of '" + Name +
"' does not match original.");
11794bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
11795 MCAsmParser &Parser = getParser();
11797 return Error(L,
"unexpected input in .unreq directive.");
11806void ARMAsmParser::FixModeAfterArchChange(
bool WasThumb, SMLoc Loc) {
11808 if (WasThumb && hasThumb()) {
11811 }
else if (!WasThumb && hasARM()) {
11817 getTargetStreamer().emitCode16();
11819 getTargetStreamer().emitCode32();
11823 Warning(Loc, Twine(
"new target does not support ") +
11824 (WasThumb ?
"thumb" :
"arm") +
" mode, switching to " +
11825 (!WasThumb ?
"thumb" :
"arm") +
" mode");
11832bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
11833 StringRef Arch = getParser().parseStringToEndOfStatement().trim();
11836 if (
ID == ARM::ArchKind::INVALID)
11837 return Error(L,
"Unknown arch name");
11840 MCSubtargetInfo &STI = copySTI();
11843 setAvailableFeatures(ComputeAvailableFeatures(STI.
getFeatureBits()));
11844 FixModeAfterArchChange(WasThumb, L);
11846 getTargetStreamer().emitArch(
ID);
11853bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
11854 MCAsmParser &Parser = getParser();
11863 Error(TagLoc,
"attribute name not recognised: " + Name);
11869 const MCExpr *AttrExpr;
11876 if (check(!CE, TagLoc,
"expected numeric constant"))
11879 Tag =
CE->getValue();
11885 StringRef StringValue =
"";
11886 bool IsStringValue =
false;
11888 int64_t IntegerValue = 0;
11889 bool IsIntegerValue =
false;
11892 IsStringValue =
true;
11894 IsStringValue =
true;
11895 IsIntegerValue =
true;
11896 }
else if (
Tag < 32 ||
Tag % 2 == 0)
11897 IsIntegerValue =
true;
11898 else if (
Tag % 2 == 1)
11899 IsStringValue =
true;
11903 if (IsIntegerValue) {
11904 const MCExpr *ValueExpr;
11911 return Error(ValueExprLoc,
"expected numeric constant");
11912 IntegerValue =
CE->getValue();
11920 std::string EscapedValue;
11921 if (IsStringValue) {
11929 StringValue = EscapedValue;
11939 if (IsIntegerValue && IsStringValue) {
11941 getTargetStreamer().emitIntTextAttribute(
Tag, IntegerValue, StringValue);
11942 }
else if (IsIntegerValue)
11943 getTargetStreamer().emitAttribute(
Tag, IntegerValue);
11944 else if (IsStringValue)
11945 getTargetStreamer().emitTextAttribute(
Tag, StringValue);
11951bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
11952 StringRef CPU = getParser().parseStringToEndOfStatement().trim();
11957 if (!getSTI().isCPUStringValid(CPU))
11958 return Error(L,
"Unknown CPU name");
11961 MCSubtargetInfo &STI = copySTI();
11963 setAvailableFeatures(ComputeAvailableFeatures(STI.
getFeatureBits()));
11964 FixModeAfterArchChange(WasThumb, L);
11971bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
11972 SMLoc FPUNameLoc = getTok().getLoc();
11973 StringRef FPU = getParser().parseStringToEndOfStatement().trim();
11976 std::vector<StringRef> Features;
11978 return Error(FPUNameLoc,
"Unknown FPU name");
11980 MCSubtargetInfo &STI = copySTI();
11981 for (
auto Feature : Features)
11983 setAvailableFeatures(ComputeAvailableFeatures(STI.
getFeatureBits()));
11985 getTargetStreamer().emitFPU(
ID);
11991bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
11995 if (UC.hasFnStart()) {
11996 Error(L,
".fnstart starts before the end of previous one");
11997 UC.emitFnStartLocNotes();
12004 getTargetStreamer().emitFnStart();
12006 UC.recordFnStart(L);
12012bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
12016 if (!UC.hasFnStart())
12017 return Error(L,
".fnstart must precede .fnend directive");
12020 getTargetStreamer().emitFnEnd();
12028bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
12032 UC.recordCantUnwind(L);
12034 if (check(!UC.hasFnStart(), L,
".fnstart must precede .cantunwind directive"))
12037 if (UC.hasHandlerData()) {
12038 Error(L,
".cantunwind can't be used with .handlerdata directive");
12039 UC.emitHandlerDataLocNotes();
12042 if (UC.hasPersonality()) {
12043 Error(L,
".cantunwind can't be used with .personality directive");
12044 UC.emitPersonalityLocNotes();
12048 getTargetStreamer().emitCantUnwind();
12054bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
12055 MCAsmParser &Parser = getParser();
12056 bool HasExistingPersonality = UC.hasPersonality();
12060 return Error(L,
"unexpected input in .personality directive.");
12067 UC.recordPersonality(L);
12070 if (!UC.hasFnStart())
12071 return Error(L,
".fnstart must precede .personality directive");
12072 if (UC.cantUnwind()) {
12073 Error(L,
".personality can't be used with .cantunwind directive");
12074 UC.emitCantUnwindLocNotes();
12077 if (UC.hasHandlerData()) {
12078 Error(L,
".personality must precede .handlerdata directive");
12079 UC.emitHandlerDataLocNotes();
12082 if (HasExistingPersonality) {
12083 Error(L,
"multiple personality directives");
12084 UC.emitPersonalityLocNotes();
12088 MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
12089 getTargetStreamer().emitPersonality(PR);
12095bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
12099 UC.recordHandlerData(L);
12101 if (!UC.hasFnStart())
12102 return Error(L,
".fnstart must precede .personality directive");
12103 if (UC.cantUnwind()) {
12104 Error(L,
".handlerdata can't be used with .cantunwind directive");
12105 UC.emitCantUnwindLocNotes();
12109 getTargetStreamer().emitHandlerData();
12115bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
12116 MCAsmParser &Parser = getParser();
12118 if (check(!UC.hasFnStart(), L,
".fnstart must precede .setfp directive") ||
12119 check(UC.hasHandlerData(), L,
12120 ".setfp must precede .handlerdata directive"))
12125 MCRegister
FPReg = tryParseRegister();
12127 if (check(!
FPReg, FPRegLoc,
"frame pointer register expected") ||
12133 MCRegister
SPReg = tryParseRegister();
12134 if (check(!
SPReg, SPRegLoc,
"stack pointer register expected") ||
12135 check(
SPReg != ARM::SP &&
SPReg != UC.getFPReg(), SPRegLoc,
12136 "register should be either $sp or the latest fp register"))
12140 UC.saveFPReg(
FPReg);
12150 const MCExpr *OffsetExpr;
12153 if (getParser().parseExpression(OffsetExpr, EndLoc))
12154 return Error(ExLoc,
"malformed setfp offset");
12156 if (check(!CE, ExLoc,
"setfp offset must be an immediate"))
12170bool ARMAsmParser::parseDirectivePad(SMLoc L) {
12171 MCAsmParser &Parser = getParser();
12173 if (!UC.hasFnStart())
12174 return Error(L,
".fnstart must precede .pad directive");
12175 if (UC.hasHandlerData())
12176 return Error(L,
".pad must precede .handlerdata directive");
12184 const MCExpr *OffsetExpr;
12187 if (getParser().parseExpression(OffsetExpr, EndLoc))
12188 return Error(ExLoc,
"malformed pad offset");
12191 return Error(ExLoc,
"pad offset must be an immediate");
12196 getTargetStreamer().emitPad(
CE->getValue());
12203bool ARMAsmParser::parseDirectiveRegSave(SMLoc L,
bool IsVector) {
12205 if (!UC.hasFnStart())
12206 return Error(L,
".fnstart must precede .save or .vsave directives");
12207 if (UC.hasHandlerData())
12208 return Error(L,
".save or .vsave must precede .handlerdata directive");
12214 if (parseRegisterList(Operands,
true,
true) || parseEOL())
12216 ARMOperand &
Op = (ARMOperand &)*Operands[0];
12217 if (!IsVector && !
Op.isRegList())
12218 return Error(L,
".save expects GPR registers");
12219 if (IsVector && !
Op.isDPRRegList())
12220 return Error(L,
".vsave expects DPR registers");
12222 getTargetStreamer().emitRegSave(
Op.getRegList(), IsVector);
12230bool ARMAsmParser::parseDirectiveInst(SMLoc Loc,
char Suffix) {
12246 return Error(Loc,
"width suffixes are invalid in ARM mode");
12249 auto parseOne = [&]() ->
bool {
12250 const MCExpr *Expr;
12251 if (getParser().parseExpression(Expr))
12255 return Error(Loc,
"expected constant expression");
12258 char CurSuffix = Suffix;
12261 if (
Value->getValue() > 0xffff)
12262 return Error(Loc,
"inst.n operand is too big, use inst.w instead");
12265 if (
Value->getValue() > 0xffffffff)
12266 return Error(Loc, StringRef(Suffix ?
"inst.w" :
"inst") +
12267 " operand is too big");
12271 if (
Value->getValue() < 0xe800)
12273 else if (
Value->getValue() >= 0xe8000000)
12276 return Error(Loc,
"cannot determine Thumb instruction size, "
12277 "use inst.n/inst.w instead");
12283 getTargetStreamer().emitInst(
Value->getValue(), CurSuffix);
12284 forwardITPosition();
12285 forwardVPTPosition();
12290 return Error(Loc,
"expected expression following directive");
12291 if (parseMany(parseOne))
12298bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
12301 getTargetStreamer().emitCurrentConstantPool();
12305bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
12306 const MCSection *
Section = getStreamer().getCurrentSectionOnly();
12312 getStreamer().initSections(getSTI());
12313 Section = getStreamer().getCurrentSectionOnly();
12316 assert(Section &&
"must have section to emit alignment");
12317 if (
getContext().getAsmInfo()->useCodeAlign(*Section))
12318 getStreamer().emitCodeAlignment(
Align(2), &getSTI());
12320 getStreamer().emitValueToAlignment(
Align(2));
12327bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
12328 MCAsmParser &Parser = getParser();
12329 bool HasExistingPersonality = UC.hasPersonality();
12331 const MCExpr *IndexExpression;
12337 UC.recordPersonalityIndex(L);
12339 if (!UC.hasFnStart()) {
12340 return Error(L,
".fnstart must precede .personalityindex directive");
12342 if (UC.cantUnwind()) {
12343 Error(L,
".personalityindex cannot be used with .cantunwind");
12344 UC.emitCantUnwindLocNotes();
12347 if (UC.hasHandlerData()) {
12348 Error(L,
".personalityindex must precede .handlerdata directive");
12349 UC.emitHandlerDataLocNotes();
12352 if (HasExistingPersonality) {
12353 Error(L,
"multiple personality directives");
12354 UC.emitPersonalityLocNotes();
12360 return Error(IndexLoc,
"index must be a constant number");
12362 return Error(IndexLoc,
12363 "personality routine index should be in range [0-3]");
12365 getTargetStreamer().emitPersonalityIndex(
CE->getValue());
12371bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
12372 MCAsmParser &Parser = getParser();
12373 int64_t StackOffset;
12374 const MCExpr *OffsetExpr;
12375 SMLoc OffsetLoc = getLexer().
getLoc();
12377 if (!UC.hasFnStart())
12378 return Error(L,
".fnstart must precede .unwind_raw directives");
12379 if (getParser().parseExpression(OffsetExpr))
12380 return Error(OffsetLoc,
"expected expression");
12384 return Error(OffsetLoc,
"offset must be a constant");
12386 StackOffset =
CE->getValue();
12393 auto parseOne = [&]() ->
bool {
12394 const MCExpr *OE =
nullptr;
12395 SMLoc OpcodeLoc = getLexer().getLoc();
12398 OpcodeLoc,
"expected opcode expression"))
12402 return Error(OpcodeLoc,
"opcode value must be a constant");
12403 const int64_t Opcode = OC->
getValue();
12404 if (Opcode & ~0xff)
12405 return Error(OpcodeLoc,
"invalid opcode");
12411 SMLoc OpcodeLoc = getLexer().getLoc();
12413 return Error(OpcodeLoc,
"expected opcode expression");
12414 if (parseMany(parseOne))
12417 getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
12423bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
12424 MCAsmParser &Parser = getParser();
12427 return TokError(
"expected variable after '.tlsdescseq' directive");
12437 getTargetStreamer().annotateTLSDescriptorSequence(SRE);
12443bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
12444 MCAsmParser &Parser = getParser();
12445 if (!UC.hasFnStart())
12446 return Error(L,
".fnstart must precede .movsp directives");
12447 if (UC.getFPReg() != ARM::SP)
12448 return Error(L,
"unexpected .movsp directive");
12451 MCRegister
SPReg = tryParseRegister();
12453 return Error(SPRegLoc,
"register expected");
12455 return Error(SPRegLoc,
"sp and pc are not permitted in .movsp directive");
12462 const MCExpr *OffsetExpr;
12466 return Error(OffsetLoc,
"malformed offset expression");
12470 return Error(OffsetLoc,
"offset must be an immediate constant");
12479 UC.saveFPReg(
SPReg);
12486bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
12487 MCAsmParser &Parser = getParser();
12489 return Error(getLexer().getLoc(),
"unexpected token");
12497 if (
ID == ARM::ArchKind::INVALID)
12498 return Error(ArchLoc,
"unknown architecture '" + Arch +
"'");
12502 getTargetStreamer().emitObjectArch(
ID);
12508bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
12513 const MCSection *
Section = getStreamer().getCurrentSectionOnly();
12514 assert(Section &&
"must have section to emit alignment");
12515 if (
getContext().getAsmInfo()->useCodeAlign(*Section))
12516 getStreamer().emitCodeAlignment(
Align(4), &getSTI(), 0);
12518 getStreamer().emitValueToAlignment(
Align(4), 0, 1, 0);
12526bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
12527 MCAsmParser &Parser = getParser();
12531 "expected identifier after '.thumb_set'") ||
12536 const MCExpr *
Value;
12538 Parser, Sym,
Value))
12541 getTargetStreamer().emitThumbSet(Sym,
Value);
12548bool ARMAsmParser::parseDirectiveSEHAllocStack(SMLoc L,
bool Wide) {
12550 if (parseImmExpr(
Size))
12552 getTargetStreamer().emitARMWinCFIAllocStack(
Size, Wide);
12559bool ARMAsmParser::parseDirectiveSEHSaveRegs(SMLoc L,
bool Wide) {
12562 if (parseRegisterList(Operands) || parseEOL())
12564 ARMOperand &
Op = (ARMOperand &)*Operands[0];
12565 if (!
Op.isRegList())
12566 return Error(L,
".seh_save_regs{_w} expects GPR registers");
12567 const SmallVectorImpl<MCRegister> &RegList =
Op.getRegList();
12569 for (
size_t i = 0; i < RegList.
size(); ++i) {
12574 return Error(L,
".seh_save_regs{_w} can't include SP");
12575 assert(
Reg < 16U &&
"Register out of range");
12576 unsigned Bit = (1u <<
Reg);
12579 if (!Wide && (Mask & 0x1f00) != 0)
12581 ".seh_save_regs cannot save R8-R12, needs .seh_save_regs_w");
12582 getTargetStreamer().emitARMWinCFISaveRegMask(Mask, Wide);
12588bool ARMAsmParser::parseDirectiveSEHSaveSP(SMLoc L) {
12589 MCRegister
Reg = tryParseRegister();
12591 return Error(L,
"expected GPR");
12593 if (Index > 14 || Index == 13)
12594 return Error(L,
"invalid register for .seh_save_sp");
12595 getTargetStreamer().emitARMWinCFISaveSP(Index);
12601bool ARMAsmParser::parseDirectiveSEHSaveFRegs(SMLoc L) {
12604 if (parseRegisterList(Operands) || parseEOL())
12606 ARMOperand &
Op = (ARMOperand &)*Operands[0];
12607 if (!
Op.isDPRRegList())
12608 return Error(L,
".seh_save_fregs expects DPR registers");
12609 const SmallVectorImpl<MCRegister> &RegList =
Op.getRegList();
12611 for (
size_t i = 0; i < RegList.
size(); ++i) {
12613 assert(
Reg < 32U &&
"Register out of range");
12614 unsigned Bit = (1u <<
Reg);
12619 return Error(L,
".seh_save_fregs missing registers");
12621 unsigned First = 0;
12622 while ((Mask & 1) == 0) {
12626 if (((Mask + 1) & Mask) != 0)
12628 ".seh_save_fregs must take a contiguous range of registers");
12630 while ((Mask & 2) != 0) {
12635 return Error(L,
".seh_save_fregs must be all d0-d15 or d16-d31");
12636 getTargetStreamer().emitARMWinCFISaveFRegs(
First,
Last);
12642bool ARMAsmParser::parseDirectiveSEHSaveLR(SMLoc L) {
12644 if (parseImmExpr(
Offset))
12646 getTargetStreamer().emitARMWinCFISaveLR(
Offset);
12653bool ARMAsmParser::parseDirectiveSEHPrologEnd(SMLoc L,
bool Fragment) {
12654 getTargetStreamer().emitARMWinCFIPrologEnd(Fragment);
12661bool ARMAsmParser::parseDirectiveSEHNop(SMLoc L,
bool Wide) {
12662 getTargetStreamer().emitARMWinCFINop(Wide);
12669bool ARMAsmParser::parseDirectiveSEHEpilogStart(SMLoc L,
bool Condition) {
12672 MCAsmParser &Parser = getParser();
12674 const AsmToken &Tok = Parser.
getTok();
12676 return Error(S,
".seh_startepilogue_cond missing condition");
12679 return Error(S,
"invalid condition");
12683 getTargetStreamer().emitARMWinCFIEpilogStart(CC);
12689bool ARMAsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
12690 getTargetStreamer().emitARMWinCFIEpilogEnd();
12696bool ARMAsmParser::parseDirectiveSEHCustom(SMLoc L) {
12697 unsigned Opcode = 0;
12700 if (parseImmExpr(Byte))
12702 if (Byte > 0xff || Byte < 0)
12703 return Error(L,
"Invalid byte value in .seh_custom");
12704 if (Opcode > 0x00ffffff)
12705 return Error(L,
"Too many bytes in .seh_custom");
12708 Opcode = (Opcode << 8) | Byte;
12710 getTargetStreamer().emitARMWinCFICustom(Opcode);
12722#define GET_REGISTER_MATCHER
12723#define GET_SUBTARGET_FEATURE_NAME
12724#define GET_MATCHER_IMPLEMENTATION
12725#define GET_MNEMONIC_SPELL_CHECKER
12726#include "ARMGenAsmMatcher.inc"
12732ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
12733 switch (MatchError) {
12736 return hasV8Ops() ?
"operand must be a register in range [r0, r14]"
12737 :
"operand must be a register in range [r0, r12] or r14";
12740 return hasD32() ?
"operand must be a register in range [d0, d31]"
12741 :
"operand must be a register in range [d0, d15]";
12742 case Match_DPR_RegList:
12743 return hasD32() ?
"operand must be a list of registers in range [d0, d31]"
12744 :
"operand must be a list of registers in range [d0, d15]";
12748 return getMatchKindDiag(MatchError);
12756ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
12757 SmallVectorImpl<NearMissMessage> &NearMissesOut,
12771 std::multimap<unsigned, unsigned> OperandMissesSeen;
12772 SmallSet<FeatureBitset, 4> FeatureMissesSeen;
12773 bool ReportedTooFewOperands =
false;
12779 for (NearMissInfo &
I :
reverse(NearMissesIn)) {
12780 switch (
I.getKind()) {
12783 ((ARMOperand &)*Operands[
I.getOperandIndex()]).getStartLoc();
12784 const char *OperandDiag =
12785 getCustomOperandDiag((ARMMatchResultTy)
I.getOperandError());
12792 unsigned DupCheckMatchClass = OperandDiag ?
I.getOperandClass() : ~0
U;
12793 auto PrevReports = OperandMissesSeen.equal_range(
I.getOperandIndex());
12794 if (std::any_of(PrevReports.first, PrevReports.second,
12795 [DupCheckMatchClass](
12796 const std::pair<unsigned, unsigned> Pair) {
12797 if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
12798 return Pair.second == DupCheckMatchClass;
12800 return isSubclass((MatchClassKind)DupCheckMatchClass,
12801 (MatchClassKind)Pair.second);
12804 OperandMissesSeen.insert(
12805 std::make_pair(
I.getOperandIndex(), DupCheckMatchClass));
12807 NearMissMessage Message;
12808 Message.Loc = OperandLoc;
12810 Message.Message = OperandDiag;
12811 }
else if (
I.getOperandClass() == InvalidMatchClass) {
12812 Message.Message =
"too many operands for instruction";
12814 Message.Message =
"invalid operand for instruction";
12816 dbgs() <<
"Missing diagnostic string for operand class "
12817 << getMatchClassName((MatchClassKind)
I.getOperandClass())
12818 <<
I.getOperandClass() <<
", error " <<
I.getOperandError()
12819 <<
", opcode " << MII.getName(
I.getOpcode()) <<
"\n");
12825 const FeatureBitset &MissingFeatures =
I.getFeatures();
12827 if (FeatureMissesSeen.
count(MissingFeatures))
12829 FeatureMissesSeen.
insert(MissingFeatures);
12833 if (MissingFeatures.
test(Feature_IsARMBit) && !hasARM())
12837 if (
isThumb() && MissingFeatures.
test(Feature_IsARMBit) &&
12838 MissingFeatures.
count() > 1)
12840 if (!
isThumb() && MissingFeatures.
test(Feature_IsThumbBit) &&
12841 MissingFeatures.
count() > 1)
12843 if (!
isThumb() && MissingFeatures.
test(Feature_IsThumb2Bit) &&
12844 (MissingFeatures & ~FeatureBitset({Feature_IsThumb2Bit,
12845 Feature_IsThumbBit})).any())
12847 if (isMClass() && MissingFeatures.
test(Feature_HasNEONBit))
12850 NearMissMessage Message;
12851 Message.Loc = IDLoc;
12852 raw_svector_ostream OS(Message.Message);
12854 OS <<
"instruction requires:";
12855 for (
unsigned i = 0, e = MissingFeatures.
size(); i != e; ++i)
12856 if (MissingFeatures.
test(i))
12864 NearMissMessage Message;
12865 Message.Loc = IDLoc;
12866 switch (
I.getPredicateError()) {
12867 case Match_RequiresNotITBlock:
12868 Message.Message =
"flag setting instruction only valid outside IT block";
12870 case Match_RequiresITBlock:
12871 Message.Message =
"instruction only valid inside IT block";
12873 case Match_RequiresV6:
12874 Message.Message =
"instruction variant requires ARMv6 or later";
12876 case Match_RequiresThumb2:
12877 Message.Message =
"instruction variant requires Thumb2";
12879 case Match_RequiresV8:
12880 Message.Message =
"instruction variant requires ARMv8 or later";
12882 case Match_RequiresFlagSetting:
12883 Message.Message =
"no flag-preserving variant of this instruction available";
12885 case Match_InvalidTiedOperand: {
12886 ARMOperand &
Op =
static_cast<ARMOperand &
>(*Operands[0]);
12887 if (
Op.isToken() &&
Op.getToken() ==
"mul") {
12888 Message.Message =
"destination register must match a source register";
12889 Message.Loc = Operands[MnemonicOpsEndInd]->getStartLoc();
12895 case Match_InvalidOperand:
12896 Message.Message =
"invalid operand for instruction";
12906 if (!ReportedTooFewOperands) {
12907 SMLoc EndLoc = ((ARMOperand &)*Operands.
back()).getEndLoc();
12909 EndLoc, StringRef(
"too few operands for instruction")});
12910 ReportedTooFewOperands =
true;
12922void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses,
12925 FilterNearMisses(NearMisses, Messages, IDLoc, Operands);
12930 Error(IDLoc,
"invalid instruction");
12931 }
else if (
Messages.size() == 1) {
12933 Error(Messages[0].Loc, Messages[0].Message);
12937 Error(IDLoc,
"invalid instruction, any one of the following would fix this:");
12938 for (
auto &M : Messages) {
12944bool ARMAsmParser::enableArchExtFeature(StringRef Name, SMLoc &ExtLoc) {
12948 static const struct {
12949 const uint64_t
Kind;
12950 const FeatureBitset ArchCheck;
12951 const FeatureBitset Features;
12953 {
ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC}},
12955 {Feature_HasV8Bit},
12956 {ARM::FeatureAES, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12958 {Feature_HasV8Bit},
12959 {ARM::FeatureSHA2, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12961 {Feature_HasV8Bit},
12962 {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12964 {Feature_HasV8_1MMainlineBit},
12965 {ARM::HasMVEFloatOps}},
12967 {Feature_HasV8Bit},
12968 {ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12970 {Feature_HasV7Bit, Feature_IsNotMClassBit},
12971 {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM}},
12973 {Feature_HasV7Bit, Feature_IsNotMClassBit},
12976 {Feature_HasV8Bit},
12977 {ARM::FeatureNEON, ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12978 {
ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone}},
12980 {
ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization}},
12982 {Feature_HasV8_2aBit},
12983 {ARM::FeatureFPARMv8, ARM::FeatureFullFP16}},
12984 {
ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS}},
12985 {
ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB}},
12986 {
ARM::AEK_PACBTI, {Feature_HasV8_1MMainlineBit}, {ARM::FeaturePACBTI}},
12994 bool EnableFeature = !
Name.consume_front_insensitive(
"no");
12997 return Error(ExtLoc,
"unknown architectural extension: " + Name);
13004 return Error(ExtLoc,
"unsupported architectural extension: " + Name);
13007 return Error(ExtLoc,
"architectural extension '" + Name +
13009 "allowed for the current base architecture");
13011 MCSubtargetInfo &STI = copySTI();
13012 if (EnableFeature) {
13017 FeatureBitset Features = ComputeAvailableFeatures(STI.
getFeatureBits());
13018 setAvailableFeatures(Features);
13026bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
13028 MCAsmParser &Parser = getParser();
13031 return Error(getLexer().getLoc(),
"expected architecture extension name");
13040 if (Name ==
"nocrypto") {
13041 enableArchExtFeature(
"nosha2", ExtLoc);
13042 enableArchExtFeature(
"noaes", ExtLoc);
13045 if (enableArchExtFeature(Name, ExtLoc))
13048 return Error(ExtLoc,
"unknown architectural extension: " + Name);
13053unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
13055 ARMOperand &
Op =
static_cast<ARMOperand &
>(AsmOp);
13064 if (
CE->getValue() == 0)
13065 return Match_Success;
13070 if (
CE->getValue() == 8)
13071 return Match_Success;
13076 if (
CE->getValue() == 16)
13077 return Match_Success;
13081 const MCExpr *SOExpr =
Op.getImm();
13083 if (!SOExpr->evaluateAsAbsolute(
Value))
13084 return Match_Success;
13085 assert((
Value >= std::numeric_limits<int32_t>::min() &&
13086 Value <= std::numeric_limits<uint32_t>::max()) &&
13087 "expression value must be representable in 32 bits");
13091 if (hasV8Ops() &&
Op.isReg() &&
Op.getReg() == ARM::SP)
13092 return Match_Success;
13095 return Match_InvalidOperand;
13098bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic,
13099 StringRef ExtraToken) {
13103 if (MS.isVPTPredicableCDEInstr(Mnemonic) ||
13104 (Mnemonic.
starts_with(
"vldrh") && Mnemonic !=
"vldrhi") ||
13106 !(ExtraToken ==
".f16" || ExtraToken ==
".32" || ExtraToken ==
".16" ||
13107 ExtraToken ==
".8")) ||
13108 (Mnemonic.
starts_with(
"vrint") && Mnemonic !=
"vrintr") ||
13109 (Mnemonic.
starts_with(
"vstrh") && Mnemonic !=
"vstrhi"))
13112 const char *predicable_prefixes[] = {
13113 "vabav",
"vabd",
"vabs",
"vadc",
"vadd",
13114 "vaddlv",
"vaddv",
"vand",
"vbic",
"vbrsr",
13115 "vcadd",
"vcls",
"vclz",
"vcmla",
"vcmp",
13116 "vcmul",
"vctp",
"vcvt",
"vddup",
"vdup",
13117 "vdwdup",
"veor",
"vfma",
"vfmas",
"vfms",
13118 "vhadd",
"vhcadd",
"vhsub",
"vidup",
"viwdup",
13119 "vldrb",
"vldrd",
"vldrw",
"vmax",
"vmaxa",
13120 "vmaxav",
"vmaxnm",
"vmaxnma",
"vmaxnmav",
"vmaxnmv",
13121 "vmaxv",
"vmin",
"vminav",
"vminnm",
"vminnmav",
13122 "vminnmv",
"vminv",
"vmla",
"vmladav",
"vmlaldav",
13123 "vmlalv",
"vmlas",
"vmlav",
"vmlsdav",
"vmlsldav",
13124 "vmovlb",
"vmovlt",
"vmovnb",
"vmovnt",
"vmul",
13125 "vmvn",
"vneg",
"vorn",
"vorr",
"vpnot",
13126 "vpsel",
"vqabs",
"vqadd",
"vqdmladh",
"vqdmlah",
13127 "vqdmlash",
"vqdmlsdh",
"vqdmulh",
"vqdmull",
"vqmovn",
13128 "vqmovun",
"vqneg",
"vqrdmladh",
"vqrdmlah",
"vqrdmlash",
13129 "vqrdmlsdh",
"vqrdmulh",
"vqrshl",
"vqrshrn",
"vqrshrun",
13130 "vqshl",
"vqshrn",
"vqshrun",
"vqsub",
"vrev16",
13131 "vrev32",
"vrev64",
"vrhadd",
"vrmlaldavh",
"vrmlalvh",
13132 "vrmlsldavh",
"vrmulh",
"vrshl",
"vrshr",
"vrshrn",
13133 "vsbc",
"vshl",
"vshlc",
"vshll",
"vshr",
13134 "vshrn",
"vsli",
"vsri",
"vstrb",
"vstrd",
13137 return any_of(predicable_prefixes, [&Mnemonic](
const char *prefix) {
13142std::unique_ptr<ARMOperand> ARMAsmParser::defaultCondCodeOp() {
13143 return ARMOperand::CreateCondCode(
ARMCC::AL, SMLoc(), *
this);
13146std::unique_ptr<ARMOperand> ARMAsmParser::defaultCCOutOp() {
13147 return ARMOperand::CreateCCOut(0, SMLoc(), *
this);
13150std::unique_ptr<ARMOperand> ARMAsmParser::defaultVPTPredOp() {
13151 return ARMOperand::CreateVPTPred(
ARMVCC::None, SMLoc(), *
this);
static MCRegister MatchRegisterName(StringRef Name)
static const char * getSubtargetFeatureName(uint64_t Val)
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static SDValue getCondCode(SelectionDAG &DAG, AArch64CC::CondCode CC)
Like SelectionDAG::getCondCode(), but for AArch64 condition codes.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file defines the StringMap class.
static void applyMnemonicAliases(StringRef &Mnemonic, const FeatureBitset &Features, unsigned VariantID)
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing)
static bool instIsBreakpoint(const MCInst &Inst)
unsigned findCCOutInd(const OperandVector &Operands, unsigned MnemonicOpsEndInd)
static bool isDataTypeToken(StringRef Tok)
}
static MCRegister getNextRegister(MCRegister Reg)
static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing)
unsigned getRegListInd(const OperandVector &Operands, unsigned MnemonicOpsEndInd)
static bool isVectorPredicable(const MCInstrDesc &MCID)
static bool listContainsReg(const MCInst &Inst, unsigned OpNo, MCRegister Reg)
static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp)
MatchCoprocessorOperandName - Try to parse an coprocessor related instruction with a symbolic operand...
void removeCCOut(OperandVector &Operands, unsigned &MnemonicOpsEndInd)
static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo, MCRegister Reg, MCRegister HiReg, bool &containsReg)
static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT)
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeARMAsmParser()
Force static initialization.
static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID)
static bool isThumbI8Relocation(MCParsedAsmOperand &MCOp)
bool operandsContainWide(OperandVector &Operands, unsigned MnemonicOpsEndInd)
void removeCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd)
static bool insertNoDuplicates(SmallVectorImpl< std::pair< unsigned, MCRegister > > &Regs, unsigned Enc, MCRegister Reg)
static unsigned getMnemonicOpsEndInd(const OperandVector &Operands)
static bool isARMMCExpr(MCParsedAsmOperand &MCOp)
unsigned findCondCodeInd(const OperandVector &Operands, unsigned MnemonicOpsEndInd)
void removeVPTCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd)
static bool isThumb(const MCSubtargetInfo &STI)
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static uint64_t scale(uint64_t Num, uint32_t N, uint32_t D)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static Register getFPReg(const CSKYSubtarget &STI)
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_EXTERNAL_VISIBILITY
static cl::opt< bool > AddBuildAttributes("hexagon-add-build-attributes")
Value * getPointer(Value *Ptr)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool containsReg(SmallSetVector< Register, 32 > LocalDefsV, const BitVector &LocalDefsP, Register Reg, const TargetRegisterInfo *TRI)
Check if target reg is contained in given lists, which are: LocalDefsV as given list for virtual regs...
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
static constexpr MCPhysReg FPReg
static constexpr MCPhysReg SPReg
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
SI Pre allocate WWM Registers
static cl::opt< ExtensionSet, false, SPIRVExtensionsParser > Extensions("spirv-ext", cl::desc("Specify list of enabled SPIR-V extensions"))
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file implements the SmallBitVector class.
This file defines the SmallSet class.
This file defines the SmallVector class.
StringSet - A set-like wrapper for the StringMap.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=ARM::NoRegAltName)
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
LLVM_ABI SMLoc getLoc() const
int64_t getIntVal() const
bool isNot(TokenKind K) const
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
StringRef getStringContents() const
Get the contents of a string token (without quotes).
bool is(TokenKind K) const
LLVM_ABI SMLoc getEndLoc() const
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Implements a dense probed hash-table based set.
Base class for user error types.
Container class for subtarget features.
constexpr bool test(unsigned I) const
constexpr size_t size() const
void printExpr(raw_ostream &, const MCExpr &) const
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
Generic assembler parser interface, for use by target specific assembly parsers.
bool parseToken(AsmToken::TokenKind T, const Twine &Msg="unexpected token")
virtual bool parseEscapedString(std::string &Data)=0
Parse the current token as a string which may include escaped characters and return the string conten...
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
bool parseOptionalToken(AsmToken::TokenKind T)
Attempt to parse and consume token, returning true on success.
virtual void Note(SMLoc L, const Twine &Msg, SMRange Range={})=0
Emit a note at the location L, with the message Msg.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
@ Constant
Constant expressions.
Instances of this class represent a single low-level machine instruction.
unsigned getNumOperands() const
unsigned getOpcode() const
LLVM_ABI void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer=nullptr, StringRef Separator=" ", const MCContext *Ctx=nullptr) const
Dump the MCInst as prettily as possible using the additional MC structures, if given.
iterator insert(iterator I, const MCOperand &Op)
void addOperand(const MCOperand Op)
void setOpcode(unsigned Op)
const MCOperand & getOperand(unsigned i) const
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool isIndirectBranch() const
Return true if this is an indirect branch, such as a branch through a register.
int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
LLVM_ABI bool hasDefOfPhysReg(const MCInst &MI, MCRegister Reg, const MCRegisterInfo &RI) const
Return true if this instruction defines the specified physical register, either explicitly or implici...
bool isBranch() const
Returns true if this is a conditional, unconditional, or indirect branch.
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
bool isCall() const
Return true if the instruction is a call.
bool isTerminator() const
Returns true if this instruction part of the terminator for a basic block.
bool isReturn() const
Return true if the instruction is a return.
static MCOperand createExpr(const MCExpr *Val)
static MCOperand createReg(MCRegister Reg)
static MCOperand createImm(int64_t Val)
MCRegister getReg() const
Returns the register number.
const MCExpr * getExpr() const
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual SMLoc getStartLoc() const =0
getStartLoc - Get the location of the first token of this operand.
virtual bool isReg() const =0
isReg - Is this a register operand?
virtual MCRegister getReg() const =0
virtual SMLoc getEndLoc() const =0
getEndLoc - Get the location of the last token of this operand.
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
unsigned getNumRegs() const
getNumRegs - Return the number of registers in this class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Wrapper class representing physical registers. Should be passed by value.
constexpr unsigned id() const
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Streaming machine code generation interface.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
MCTargetStreamer * getTargetStreamer()
Generic base class for all target subtargets.
const FeatureBitset & getFeatureBits() const
const FeatureBitset & ToggleFeature(uint64_t FB)
Toggle a feature and return the re-computed feature bits.
const FeatureBitset & ApplyFeatureFlag(StringRef FS)
Apply a feature flag and return the re-computed feature bits, including all feature bits implied by t...
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
const FeatureBitset & ClearFeatureBitsTransitively(const FeatureBitset &FB)
const FeatureBitset & SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
MCTargetAsmParser - Generic interface to target specific assembly parsers.
Target specific streamer interface.
MCStreamer & getStreamer()
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
Represents a location in source code.
static SMLoc getFromPointer(const char *Ptr)
constexpr const char * getPointer() const
Represents a range in source code.
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
iterator erase(const_iterator CI)
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
iterator find(StringRef Key)
size_type count(StringRef Key) const
count - Return 1 if the element is in the map, 0 otherwise.
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
StringRef - Represent a constant reference to a string, i.e.
static constexpr size_t npos
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
LLVM_ABI std::string lower() const
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
StringSet - A wrapper for StringMap that provides set-like functionality.
std::pair< typename Base::iterator, bool > insert(StringRef key)
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
LLVM_ABI const TagNameMap & getARMAttributeTags()
static CondCodes getOppositeCondition(CondCodes CC)
unsigned getSORegOffset(unsigned Op)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
unsigned encodeNEONi16splat(unsigned Value)
float getFPImmFloat(unsigned Imm)
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO, unsigned IdxMode=0)
unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset)
getAM5Opc - This function encodes the addrmode5 opc field.
ShiftOpc getSORegShOp(unsigned Op)
bool isNEONi16splat(unsigned Value)
Checks if Value is a correct immediate for instructions like VBIC/VORR.
unsigned getAM5FP16Opc(AddrOpc Opc, unsigned char Offset)
getAM5FP16Opc - This function encodes the addrmode5fp16 opc field.
unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset, unsigned IdxMode=0)
getAM3Opc - This function encodes the addrmode3 opc field.
bool isNEONi32splat(unsigned Value)
Checks if Value is a correct immediate for instructions like VBIC/VORR.
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
StringRef getShiftOpcStr(ShiftOpc Op)
unsigned encodeNEONi32splat(unsigned Value)
Encode NEON 32 bits Splat immediate for instructions like VBIC/VORR.
static const char * IFlagsToString(unsigned val)
LLVM_ABI bool getFPUFeatures(FPUKind FPUKind, std::vector< StringRef > &Features)
LLVM_ABI StringRef getArchName(ArchKind AK)
LLVM_ABI uint64_t parseArchExt(StringRef ArchExt)
LLVM_ABI ArchKind parseArch(StringRef Arch)
bool isVpred(OperandType op)
LLVM_ABI FPUKind parseFPU(StringRef FPU)
bool isCDECoproc(size_t Coproc, const MCSubtargetInfo &STI)
@ D16
Only 16 D registers.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
LLVM_ABI std::optional< unsigned > attrTypeFromString(StringRef tag, TagNameMap tagNameMap)
Flag
These should be considered private to the implementation of the MCInstrDesc class.
bool parseAssignmentExpression(StringRef Name, bool allow_redef, MCAsmParser &Parser, MCSymbol *&Symbol, const MCExpr *&Value)
Parse a value expression and return whether it can be assigned to a symbol with the given name.
@ CE
Windows NT (Windows on ARM)
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
NodeAddr< FuncNode * > Func
Context & getContext() const
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
static const char * ARMVPTPredToString(ARMVCC::VPTCodes CC)
constexpr T rotr(T V, int R)
FunctionAddr VTableAddr Value
static bool isMem(const MachineInstr &MI, unsigned Op)
LLVM_ABI std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \t\n\v\f\r")
getToken - This function extracts one token from source, ignoring any leading characters that appear ...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
static bool isARMLowRegister(MCRegister Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
Target & getTheThumbBETarget()
static unsigned ARMCondCodeFromString(StringRef CC)
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
SmallVectorImpl< std::unique_ptr< MCParsedAsmOperand > > OperandVector
auto reverse(ContainerTy &&C)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
@ Never
Never set the bit.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
FunctionAddr VTableAddr Count
bool is_sorted(R &&Range, Compare C)
Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
bool IsCPSRDead< MCInst >(const MCInst *Instr)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
static bool isValidCoprocessorNumber(unsigned Num, const FeatureBitset &featureBits)
isValidCoprocessorNumber - decide whether an explicit coprocessor number is legal in generic instruct...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
FunctionAddr VTableAddr uintptr_t uintptr_t Data
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
static unsigned ARMVectorCondCodeFromString(StringRef CC)
static const char * ARMCondCodeToString(ARMCC::CondCodes CC)
@ Always
Always emit .debug_str_offsets talbes as DWARF64 for testing.
Target & getTheARMLETarget()
Target & getTheARMBETarget()
Target & getTheThumbLETarget()
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...