LLVM 23.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCAsmInfo.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
40#include "llvm/MC/MCStreamer.h"
42#include "llvm/MC/MCSymbol.h"
44#include "llvm/MC/MCValue.h"
50#include "llvm/Support/SMLoc.h"
54#include <cassert>
55#include <cctype>
56#include <cstdint>
57#include <cstdio>
58#include <optional>
59#include <string>
60#include <tuple>
61#include <utility>
62#include <vector>
63
64using namespace llvm;
65
66namespace {
67
68enum class RegKind {
69 Scalar,
70 NeonVector,
71 SVEDataVector,
72 SVEPredicateAsCounter,
73 SVEPredicateVector,
74 Matrix,
75 LookupTable
76};
77
78enum class MatrixKind { Array, Tile, Row, Col };
79
80enum RegConstraintEqualityTy {
81 EqualsReg,
82 EqualsSuperReg,
83 EqualsSubReg
84};
85
86class AArch64AsmParser : public MCTargetAsmParser {
87private:
88 StringRef Mnemonic; ///< Instruction mnemonic.
89
90 // Map of register aliases registers via the .req directive.
91 StringMap<std::pair<RegKind, MCRegister>> RegisterReqs;
92
93 class PrefixInfo {
94 public:
95 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
96 PrefixInfo Prefix;
97 switch (Inst.getOpcode()) {
98 case AArch64::MOVPRFX_ZZ:
99 Prefix.Active = true;
100 Prefix.Dst = Inst.getOperand(0).getReg();
101 break;
102 case AArch64::MOVPRFX_ZPmZ_B:
103 case AArch64::MOVPRFX_ZPmZ_H:
104 case AArch64::MOVPRFX_ZPmZ_S:
105 case AArch64::MOVPRFX_ZPmZ_D:
106 Prefix.Active = true;
107 Prefix.Predicated = true;
108 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
109 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
110 "No destructive element size set for movprfx");
111 Prefix.Dst = Inst.getOperand(0).getReg();
112 Prefix.Pg = Inst.getOperand(2).getReg();
113 break;
114 case AArch64::MOVPRFX_ZPzZ_B:
115 case AArch64::MOVPRFX_ZPzZ_H:
116 case AArch64::MOVPRFX_ZPzZ_S:
117 case AArch64::MOVPRFX_ZPzZ_D:
118 Prefix.Active = true;
119 Prefix.Predicated = true;
120 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
121 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
122 "No destructive element size set for movprfx");
123 Prefix.Dst = Inst.getOperand(0).getReg();
124 Prefix.Pg = Inst.getOperand(1).getReg();
125 break;
126 default:
127 break;
128 }
129
130 return Prefix;
131 }
132
133 PrefixInfo() = default;
134 bool isActive() const { return Active; }
135 bool isPredicated() const { return Predicated; }
136 unsigned getElementSize() const {
137 assert(Predicated);
138 return ElementSize;
139 }
140 MCRegister getDstReg() const { return Dst; }
141 MCRegister getPgReg() const {
142 assert(Predicated);
143 return Pg;
144 }
145
146 private:
147 bool Active = false;
148 bool Predicated = false;
149 unsigned ElementSize;
150 MCRegister Dst;
151 MCRegister Pg;
152 } NextPrefix;
153
154 AArch64TargetStreamer &getTargetStreamer() {
155 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
156 return static_cast<AArch64TargetStreamer &>(TS);
157 }
158
159 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
160
161 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 bool parseSyslAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
163 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
164 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
165 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
166 std::string &Suggestion);
167 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
168 MCRegister matchRegisterNameAlias(StringRef Name, RegKind Kind);
169 bool parseRegister(OperandVector &Operands);
170 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
171 bool parseNeonVectorList(OperandVector &Operands);
172 bool parseOptionalMulOperand(OperandVector &Operands);
173 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
174 bool parseKeywordOperand(OperandVector &Operands);
175 bool parseOperand(OperandVector &Operands, bool isCondCode,
176 bool invertCondCode);
177 bool parseImmExpr(int64_t &Out);
178 bool parseComma();
179 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
180 unsigned Last);
181
182 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
183 OperandVector &Operands);
184
185 bool parseExprWithSpecifier(const MCExpr *&Res, SMLoc &E);
186 bool parseDataExpr(const MCExpr *&Res) override;
187 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
188
189 bool parseDirectiveArch(SMLoc L);
190 bool parseDirectiveArchExtension(SMLoc L);
191 bool parseDirectiveCPU(SMLoc L);
192 bool parseDirectiveInst(SMLoc L);
193
194 bool parseDirectiveTLSDescCall(SMLoc L);
195
196 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
197 bool parseDirectiveLtorg(SMLoc L);
198
199 bool parseDirectiveReq(StringRef Name, SMLoc L);
200 bool parseDirectiveUnreq(SMLoc L);
201 bool parseDirectiveCFINegateRAState();
202 bool parseDirectiveCFINegateRAStateWithPC();
203 bool parseDirectiveCFIBKeyFrame();
204 bool parseDirectiveCFIMTETaggedFrame();
205
206 bool parseDirectiveVariantPCS(SMLoc L);
207
208 bool parseDirectiveSEHAllocStack(SMLoc L);
209 bool parseDirectiveSEHPrologEnd(SMLoc L);
210 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
211 bool parseDirectiveSEHSaveFPLR(SMLoc L);
212 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
213 bool parseDirectiveSEHSaveReg(SMLoc L);
214 bool parseDirectiveSEHSaveRegX(SMLoc L);
215 bool parseDirectiveSEHSaveRegP(SMLoc L);
216 bool parseDirectiveSEHSaveRegPX(SMLoc L);
217 bool parseDirectiveSEHSaveLRPair(SMLoc L);
218 bool parseDirectiveSEHSaveFReg(SMLoc L);
219 bool parseDirectiveSEHSaveFRegX(SMLoc L);
220 bool parseDirectiveSEHSaveFRegP(SMLoc L);
221 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
222 bool parseDirectiveSEHSetFP(SMLoc L);
223 bool parseDirectiveSEHAddFP(SMLoc L);
224 bool parseDirectiveSEHNop(SMLoc L);
225 bool parseDirectiveSEHSaveNext(SMLoc L);
226 bool parseDirectiveSEHEpilogStart(SMLoc L);
227 bool parseDirectiveSEHEpilogEnd(SMLoc L);
228 bool parseDirectiveSEHTrapFrame(SMLoc L);
229 bool parseDirectiveSEHMachineFrame(SMLoc L);
230 bool parseDirectiveSEHContext(SMLoc L);
231 bool parseDirectiveSEHECContext(SMLoc L);
232 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
233 bool parseDirectiveSEHPACSignLR(SMLoc L);
234 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
235 bool parseDirectiveSEHAllocZ(SMLoc L);
236 bool parseDirectiveSEHSaveZReg(SMLoc L);
237 bool parseDirectiveSEHSavePReg(SMLoc L);
238 bool parseDirectiveAeabiSubSectionHeader(SMLoc L);
239 bool parseDirectiveAeabiAArch64Attr(SMLoc L);
240
241 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
242 SmallVectorImpl<SMLoc> &Loc);
243 unsigned getNumRegsForRegKind(RegKind K);
244 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
245 OperandVector &Operands, MCStreamer &Out,
246 uint64_t &ErrorInfo,
247 bool MatchingInlineAsm) override;
248 /// @name Auto-generated Match Functions
249 /// {
250
251#define GET_ASSEMBLER_HEADER
252#include "AArch64GenAsmMatcher.inc"
253
254 /// }
255
256 ParseStatus tryParseScalarRegister(MCRegister &Reg);
257 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
258 RegKind MatchKind);
259 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
260 ParseStatus tryParseSVCR(OperandVector &Operands);
261 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
262 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
263 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
264 ParseStatus tryParseSysReg(OperandVector &Operands);
265 ParseStatus tryParseSysCROperand(OperandVector &Operands);
266 template <bool IsSVEPrefetch = false>
267 ParseStatus tryParsePrefetch(OperandVector &Operands);
268 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
269 ParseStatus tryParsePSBHint(OperandVector &Operands);
270 ParseStatus tryParseBTIHint(OperandVector &Operands);
271 ParseStatus tryParseCMHPriorityHint(OperandVector &Operands);
272 ParseStatus tryParseTIndexHint(OperandVector &Operands);
273 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
274 ParseStatus tryParseAdrLabel(OperandVector &Operands);
275 template <bool AddFPZeroAsLiteral>
276 ParseStatus tryParseFPImm(OperandVector &Operands);
277 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
278 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
279 bool tryParseNeonVectorRegister(OperandVector &Operands);
280 ParseStatus tryParseVectorIndex(OperandVector &Operands);
281 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
282 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
283 template <bool ParseShiftExtend,
284 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
285 ParseStatus tryParseGPROperand(OperandVector &Operands);
286 ParseStatus tryParseZTOperand(OperandVector &Operands);
287 template <bool ParseShiftExtend, bool ParseSuffix>
288 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
289 template <RegKind RK>
290 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
292 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
293 template <RegKind VectorKind>
294 ParseStatus tryParseVectorList(OperandVector &Operands,
295 bool ExpectMatch = false);
296 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
297 ParseStatus tryParseSVEPattern(OperandVector &Operands);
298 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
299 ParseStatus tryParseGPR64x8(OperandVector &Operands);
300 ParseStatus tryParseImmRange(OperandVector &Operands);
301 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
302 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
303
304public:
305 enum AArch64MatchResultTy {
306 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
307#define GET_OPERAND_DIAGNOSTIC_TYPES
308#include "AArch64GenAsmMatcher.inc"
309 };
310 bool IsILP32;
311 bool IsWindowsArm64EC;
312
313 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
314 const MCInstrInfo &MII, const MCTargetOptions &Options)
315 : MCTargetAsmParser(Options, STI, MII) {
316 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
317 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
319 MCStreamer &S = getParser().getStreamer();
320 if (S.getTargetStreamer() == nullptr)
321 new AArch64TargetStreamer(S);
322
323 // Alias .hword/.word/.[dx]word to the target-independent
324 // .2byte/.4byte/.8byte directives as they have the same form and
325 // semantics:
326 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
327 Parser.addAliasForDirective(".hword", ".2byte");
328 Parser.addAliasForDirective(".word", ".4byte");
329 Parser.addAliasForDirective(".dword", ".8byte");
330 Parser.addAliasForDirective(".xword", ".8byte");
331
332 // Initialize the set of available features.
333 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
334 }
335
336 bool areEqualRegs(const MCParsedAsmOperand &Op1,
337 const MCParsedAsmOperand &Op2) const override;
338 bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
339 SMLoc NameLoc, OperandVector &Operands) override;
340 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
341 ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
342 SMLoc &EndLoc) override;
343 bool ParseDirective(AsmToken DirectiveID) override;
344 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
345 unsigned Kind) override;
346
347 static bool classifySymbolRef(const MCExpr *Expr, AArch64::Specifier &ELFSpec,
348 AArch64::Specifier &DarwinSpec,
349 int64_t &Addend);
350};
351
352/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
353/// instruction.
354class AArch64Operand : public MCParsedAsmOperand {
355private:
356 enum KindTy {
357 k_Immediate,
358 k_ShiftedImm,
359 k_ImmRange,
360 k_CondCode,
361 k_Register,
362 k_MatrixRegister,
363 k_MatrixTileList,
364 k_SVCR,
365 k_VectorList,
366 k_VectorIndex,
367 k_Token,
368 k_SysReg,
369 k_SysCR,
370 k_Prefetch,
371 k_ShiftExtend,
372 k_FPImm,
373 k_Barrier,
374 k_PSBHint,
375 k_PHint,
376 k_BTIHint,
377 k_CMHPriorityHint,
378 k_TIndexHint,
379 } Kind;
380
381 SMLoc StartLoc, EndLoc;
382
383 struct TokOp {
384 const char *Data;
385 unsigned Length;
386 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
387 };
388
389 // Separate shift/extend operand.
390 struct ShiftExtendOp {
392 unsigned Amount;
393 bool HasExplicitAmount;
394 };
395
396 struct RegOp {
397 MCRegister Reg;
398 RegKind Kind;
399 int ElementWidth;
400
401 // The register may be allowed as a different register class,
402 // e.g. for GPR64as32 or GPR32as64.
403 RegConstraintEqualityTy EqualityTy;
404
405 // In some cases the shift/extend needs to be explicitly parsed together
406 // with the register, rather than as a separate operand. This is needed
407 // for addressing modes where the instruction as a whole dictates the
408 // scaling/extend, rather than specific bits in the instruction.
409 // By parsing them as a single operand, we avoid the need to pass an
410 // extra operand in all CodeGen patterns (because all operands need to
411 // have an associated value), and we avoid the need to update TableGen to
412 // accept operands that have no associated bits in the instruction.
413 //
414 // An added benefit of parsing them together is that the assembler
415 // can give a sensible diagnostic if the scaling is not correct.
416 //
417 // The default is 'lsl #0' (HasExplicitAmount = false) if no
418 // ShiftExtend is specified.
419 ShiftExtendOp ShiftExtend;
420 };
421
422 struct MatrixRegOp {
423 MCRegister Reg;
424 unsigned ElementWidth;
425 MatrixKind Kind;
426 };
427
428 struct MatrixTileListOp {
429 unsigned RegMask = 0;
430 };
431
432 struct VectorListOp {
433 MCRegister Reg;
434 unsigned Count;
435 unsigned Stride;
436 unsigned NumElements;
437 unsigned ElementWidth;
438 RegKind RegisterKind;
439 };
440
441 struct VectorIndexOp {
442 int Val;
443 };
444
445 struct ImmOp {
446 const MCExpr *Val;
447 };
448
449 struct ShiftedImmOp {
450 const MCExpr *Val;
451 unsigned ShiftAmount;
452 };
453
454 struct ImmRangeOp {
455 unsigned First;
456 unsigned Last;
457 };
458
459 struct CondCodeOp {
461 };
462
463 struct FPImmOp {
464 uint64_t Val; // APFloat value bitcasted to uint64_t.
465 bool IsExact; // describes whether parsed value was exact.
466 };
467
468 struct BarrierOp {
469 const char *Data;
470 unsigned Length;
471 unsigned Val; // Not the enum since not all values have names.
472 bool HasnXSModifier;
473 };
474
475 struct SysRegOp {
476 const char *Data;
477 unsigned Length;
478 uint32_t MRSReg;
479 uint32_t MSRReg;
480 uint32_t PStateField;
481 };
482
483 struct SysCRImmOp {
484 unsigned Val;
485 };
486
487 struct PrefetchOp {
488 const char *Data;
489 unsigned Length;
490 unsigned Val;
491 };
492
493 struct PSBHintOp {
494 const char *Data;
495 unsigned Length;
496 unsigned Val;
497 };
498 struct PHintOp {
499 const char *Data;
500 unsigned Length;
501 unsigned Val;
502 };
503 struct BTIHintOp {
504 const char *Data;
505 unsigned Length;
506 unsigned Val;
507 };
508 struct CMHPriorityHintOp {
509 const char *Data;
510 unsigned Length;
511 unsigned Val;
512 };
513 struct TIndexHintOp {
514 const char *Data;
515 unsigned Length;
516 unsigned Val;
517 };
518
519 struct SVCROp {
520 const char *Data;
521 unsigned Length;
522 unsigned PStateField;
523 };
524
525 union {
526 struct TokOp Tok;
527 struct RegOp Reg;
528 struct MatrixRegOp MatrixReg;
529 struct MatrixTileListOp MatrixTileList;
530 struct VectorListOp VectorList;
531 struct VectorIndexOp VectorIndex;
532 struct ImmOp Imm;
533 struct ShiftedImmOp ShiftedImm;
534 struct ImmRangeOp ImmRange;
535 struct CondCodeOp CondCode;
536 struct FPImmOp FPImm;
537 struct BarrierOp Barrier;
538 struct SysRegOp SysReg;
539 struct SysCRImmOp SysCRImm;
540 struct PrefetchOp Prefetch;
541 struct PSBHintOp PSBHint;
542 struct PHintOp PHint;
543 struct BTIHintOp BTIHint;
544 struct CMHPriorityHintOp CMHPriorityHint;
545 struct TIndexHintOp TIndexHint;
546 struct ShiftExtendOp ShiftExtend;
547 struct SVCROp SVCR;
548 };
549
550 // Keep the MCContext around as the MCExprs may need manipulated during
551 // the add<>Operands() calls.
552 MCContext &Ctx;
553
554public:
555 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
556
557 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
558 Kind = o.Kind;
559 StartLoc = o.StartLoc;
560 EndLoc = o.EndLoc;
561 switch (Kind) {
562 case k_Token:
563 Tok = o.Tok;
564 break;
565 case k_Immediate:
566 Imm = o.Imm;
567 break;
568 case k_ShiftedImm:
569 ShiftedImm = o.ShiftedImm;
570 break;
571 case k_ImmRange:
572 ImmRange = o.ImmRange;
573 break;
574 case k_CondCode:
575 CondCode = o.CondCode;
576 break;
577 case k_FPImm:
578 FPImm = o.FPImm;
579 break;
580 case k_Barrier:
581 Barrier = o.Barrier;
582 break;
583 case k_Register:
584 Reg = o.Reg;
585 break;
586 case k_MatrixRegister:
587 MatrixReg = o.MatrixReg;
588 break;
589 case k_MatrixTileList:
590 MatrixTileList = o.MatrixTileList;
591 break;
592 case k_VectorList:
593 VectorList = o.VectorList;
594 break;
595 case k_VectorIndex:
596 VectorIndex = o.VectorIndex;
597 break;
598 case k_SysReg:
599 SysReg = o.SysReg;
600 break;
601 case k_SysCR:
602 SysCRImm = o.SysCRImm;
603 break;
604 case k_Prefetch:
605 Prefetch = o.Prefetch;
606 break;
607 case k_PSBHint:
608 PSBHint = o.PSBHint;
609 break;
610 case k_PHint:
611 PHint = o.PHint;
612 break;
613 case k_BTIHint:
614 BTIHint = o.BTIHint;
615 break;
616 case k_CMHPriorityHint:
617 CMHPriorityHint = o.CMHPriorityHint;
618 break;
619 case k_TIndexHint:
620 TIndexHint = o.TIndexHint;
621 break;
622 case k_ShiftExtend:
623 ShiftExtend = o.ShiftExtend;
624 break;
625 case k_SVCR:
626 SVCR = o.SVCR;
627 break;
628 }
629 }
630
631 /// getStartLoc - Get the location of the first token of this operand.
632 SMLoc getStartLoc() const override { return StartLoc; }
633 /// getEndLoc - Get the location of the last token of this operand.
634 SMLoc getEndLoc() const override { return EndLoc; }
635
636 StringRef getToken() const {
637 assert(Kind == k_Token && "Invalid access!");
638 return StringRef(Tok.Data, Tok.Length);
639 }
640
641 bool isTokenSuffix() const {
642 assert(Kind == k_Token && "Invalid access!");
643 return Tok.IsSuffix;
644 }
645
646 const MCExpr *getImm() const {
647 assert(Kind == k_Immediate && "Invalid access!");
648 return Imm.Val;
649 }
650
651 const MCExpr *getShiftedImmVal() const {
652 assert(Kind == k_ShiftedImm && "Invalid access!");
653 return ShiftedImm.Val;
654 }
655
656 unsigned getShiftedImmShift() const {
657 assert(Kind == k_ShiftedImm && "Invalid access!");
658 return ShiftedImm.ShiftAmount;
659 }
660
661 unsigned getFirstImmVal() const {
662 assert(Kind == k_ImmRange && "Invalid access!");
663 return ImmRange.First;
664 }
665
666 unsigned getLastImmVal() const {
667 assert(Kind == k_ImmRange && "Invalid access!");
668 return ImmRange.Last;
669 }
670
672 assert(Kind == k_CondCode && "Invalid access!");
673 return CondCode.Code;
674 }
675
676 APFloat getFPImm() const {
677 assert (Kind == k_FPImm && "Invalid access!");
678 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
679 }
680
681 bool getFPImmIsExact() const {
682 assert (Kind == k_FPImm && "Invalid access!");
683 return FPImm.IsExact;
684 }
685
686 unsigned getBarrier() const {
687 assert(Kind == k_Barrier && "Invalid access!");
688 return Barrier.Val;
689 }
690
691 StringRef getBarrierName() const {
692 assert(Kind == k_Barrier && "Invalid access!");
693 return StringRef(Barrier.Data, Barrier.Length);
694 }
695
696 bool getBarriernXSModifier() const {
697 assert(Kind == k_Barrier && "Invalid access!");
698 return Barrier.HasnXSModifier;
699 }
700
701 MCRegister getReg() const override {
702 assert(Kind == k_Register && "Invalid access!");
703 return Reg.Reg;
704 }
705
706 MCRegister getMatrixReg() const {
707 assert(Kind == k_MatrixRegister && "Invalid access!");
708 return MatrixReg.Reg;
709 }
710
711 unsigned getMatrixElementWidth() const {
712 assert(Kind == k_MatrixRegister && "Invalid access!");
713 return MatrixReg.ElementWidth;
714 }
715
716 MatrixKind getMatrixKind() const {
717 assert(Kind == k_MatrixRegister && "Invalid access!");
718 return MatrixReg.Kind;
719 }
720
721 unsigned getMatrixTileListRegMask() const {
722 assert(isMatrixTileList() && "Invalid access!");
723 return MatrixTileList.RegMask;
724 }
725
726 RegConstraintEqualityTy getRegEqualityTy() const {
727 assert(Kind == k_Register && "Invalid access!");
728 return Reg.EqualityTy;
729 }
730
731 MCRegister getVectorListStart() const {
732 assert(Kind == k_VectorList && "Invalid access!");
733 return VectorList.Reg;
734 }
735
736 unsigned getVectorListCount() const {
737 assert(Kind == k_VectorList && "Invalid access!");
738 return VectorList.Count;
739 }
740
741 unsigned getVectorListStride() const {
742 assert(Kind == k_VectorList && "Invalid access!");
743 return VectorList.Stride;
744 }
745
746 int getVectorIndex() const {
747 assert(Kind == k_VectorIndex && "Invalid access!");
748 return VectorIndex.Val;
749 }
750
751 StringRef getSysReg() const {
752 assert(Kind == k_SysReg && "Invalid access!");
753 return StringRef(SysReg.Data, SysReg.Length);
754 }
755
756 unsigned getSysCR() const {
757 assert(Kind == k_SysCR && "Invalid access!");
758 return SysCRImm.Val;
759 }
760
761 unsigned getPrefetch() const {
762 assert(Kind == k_Prefetch && "Invalid access!");
763 return Prefetch.Val;
764 }
765
766 unsigned getPSBHint() const {
767 assert(Kind == k_PSBHint && "Invalid access!");
768 return PSBHint.Val;
769 }
770
771 unsigned getPHint() const {
772 assert(Kind == k_PHint && "Invalid access!");
773 return PHint.Val;
774 }
775
776 StringRef getPSBHintName() const {
777 assert(Kind == k_PSBHint && "Invalid access!");
778 return StringRef(PSBHint.Data, PSBHint.Length);
779 }
780
781 StringRef getPHintName() const {
782 assert(Kind == k_PHint && "Invalid access!");
783 return StringRef(PHint.Data, PHint.Length);
784 }
785
786 unsigned getBTIHint() const {
787 assert(Kind == k_BTIHint && "Invalid access!");
788 return BTIHint.Val;
789 }
790
791 StringRef getBTIHintName() const {
792 assert(Kind == k_BTIHint && "Invalid access!");
793 return StringRef(BTIHint.Data, BTIHint.Length);
794 }
795
796 unsigned getCMHPriorityHint() const {
797 assert(Kind == k_CMHPriorityHint && "Invalid access!");
798 return CMHPriorityHint.Val;
799 }
800
801 StringRef getCMHPriorityHintName() const {
802 assert(Kind == k_CMHPriorityHint && "Invalid access!");
803 return StringRef(CMHPriorityHint.Data, CMHPriorityHint.Length);
804 }
805
806 unsigned getTIndexHint() const {
807 assert(Kind == k_TIndexHint && "Invalid access!");
808 return TIndexHint.Val;
809 }
810
811 StringRef getTIndexHintName() const {
812 assert(Kind == k_TIndexHint && "Invalid access!");
813 return StringRef(TIndexHint.Data, TIndexHint.Length);
814 }
815
816 StringRef getSVCR() const {
817 assert(Kind == k_SVCR && "Invalid access!");
818 return StringRef(SVCR.Data, SVCR.Length);
819 }
820
821 StringRef getPrefetchName() const {
822 assert(Kind == k_Prefetch && "Invalid access!");
823 return StringRef(Prefetch.Data, Prefetch.Length);
824 }
825
826 AArch64_AM::ShiftExtendType getShiftExtendType() const {
827 if (Kind == k_ShiftExtend)
828 return ShiftExtend.Type;
829 if (Kind == k_Register)
830 return Reg.ShiftExtend.Type;
831 llvm_unreachable("Invalid access!");
832 }
833
834 unsigned getShiftExtendAmount() const {
835 if (Kind == k_ShiftExtend)
836 return ShiftExtend.Amount;
837 if (Kind == k_Register)
838 return Reg.ShiftExtend.Amount;
839 llvm_unreachable("Invalid access!");
840 }
841
842 bool hasShiftExtendAmount() const {
843 if (Kind == k_ShiftExtend)
844 return ShiftExtend.HasExplicitAmount;
845 if (Kind == k_Register)
846 return Reg.ShiftExtend.HasExplicitAmount;
847 llvm_unreachable("Invalid access!");
848 }
849
850 bool isImm() const override { return Kind == k_Immediate; }
851 bool isMem() const override { return false; }
852
853 bool isUImm6() const {
854 if (!isImm())
855 return false;
856 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
857 if (!MCE)
858 return false;
859 int64_t Val = MCE->getValue();
860 return (Val >= 0 && Val < 64);
861 }
862
863 template <int Width> bool isSImm() const {
864 return bool(isSImmScaled<Width, 1>());
865 }
866
867 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
868 return isImmScaled<Bits, Scale>(true);
869 }
870
871 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
872 DiagnosticPredicate isUImmScaled() const {
873 if (IsRange && isImmRange() &&
874 (getLastImmVal() != getFirstImmVal() + Offset))
876
877 return isImmScaled<Bits, Scale, IsRange>(false);
878 }
879
880 template <int Bits, int Scale, bool IsRange = false>
881 DiagnosticPredicate isImmScaled(bool Signed) const {
882 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
883 (isImmRange() && !IsRange))
885
886 int64_t Val;
887 if (isImmRange())
888 Val = getFirstImmVal();
889 else {
890 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
891 if (!MCE)
893 Val = MCE->getValue();
894 }
895
896 int64_t MinVal, MaxVal;
897 if (Signed) {
898 int64_t Shift = Bits - 1;
899 MinVal = (int64_t(1) << Shift) * -Scale;
900 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
901 } else {
902 MinVal = 0;
903 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
904 }
905
906 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
908
910 }
911
912 DiagnosticPredicate isSVEPattern() const {
913 if (!isImm())
915 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
916 if (!MCE)
918 int64_t Val = MCE->getValue();
919 if (Val >= 0 && Val < 32)
922 }
923
924 DiagnosticPredicate isSVEVecLenSpecifier() const {
925 if (!isImm())
927 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
928 if (!MCE)
930 int64_t Val = MCE->getValue();
931 if (Val >= 0 && Val <= 1)
934 }
935
936 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
937 AArch64::Specifier ELFSpec;
938 AArch64::Specifier DarwinSpec;
939 int64_t Addend;
940 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
941 Addend)) {
942 // If we don't understand the expression, assume the best and
943 // let the fixup and relocation code deal with it.
944 return true;
945 }
946
947 if (DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
955 ELFSpec)) {
956 // Note that we don't range-check the addend. It's adjusted modulo page
957 // size when converted, so there is no "out of range" condition when using
958 // @pageoff.
959 return true;
960 } else if (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF ||
961 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) {
962 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
963 return Addend == 0;
964 }
965
966 return false;
967 }
968
969 template <int Scale> bool isUImm12Offset() const {
970 if (!isImm())
971 return false;
972
973 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
974 if (!MCE)
975 return isSymbolicUImm12Offset(getImm());
976
977 int64_t Val = MCE->getValue();
978 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
979 }
980
981 template <int N, int M>
982 bool isImmInRange() const {
983 if (!isImm())
984 return false;
985 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
986 if (!MCE)
987 return false;
988 int64_t Val = MCE->getValue();
989 return (Val >= N && Val <= M);
990 }
991
992 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
993 // a logical immediate can always be represented when inverted.
994 template <typename T>
995 bool isLogicalImm() const {
996 if (!isImm())
997 return false;
998 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
999 if (!MCE)
1000 return false;
1001
1002 int64_t Val = MCE->getValue();
1003 // Avoid left shift by 64 directly.
1004 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
1005 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
1006 if ((Val & Upper) && (Val & Upper) != Upper)
1007 return false;
1008
1009 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
1010 }
1011
1012 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
1013
1014 bool isImmRange() const { return Kind == k_ImmRange; }
1015
1016 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
1017 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
1018 /// immediate that can be shifted by 'Shift'.
1019 template <unsigned Width>
1020 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
1021 if (isShiftedImm() && Width == getShiftedImmShift())
1022 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
1023 return std::make_pair(CE->getValue(), Width);
1024
1025 if (isImm())
1026 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
1027 int64_t Val = CE->getValue();
1028 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
1029 return std::make_pair(Val >> Width, Width);
1030 else
1031 return std::make_pair(Val, 0u);
1032 }
1033
1034 return {};
1035 }
1036
1037 bool isAddSubImm() const {
1038 if (!isShiftedImm() && !isImm())
1039 return false;
1040
1041 const MCExpr *Expr;
1042
1043 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
1044 if (isShiftedImm()) {
1045 unsigned Shift = ShiftedImm.ShiftAmount;
1046 Expr = ShiftedImm.Val;
1047 if (Shift != 0 && Shift != 12)
1048 return false;
1049 } else {
1050 Expr = getImm();
1051 }
1052
1053 AArch64::Specifier ELFSpec;
1054 AArch64::Specifier DarwinSpec;
1055 int64_t Addend;
1056 if (AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
1057 Addend)) {
1058 return DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
1059 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF ||
1060 (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF && Addend == 0) ||
1068 ELFSpec);
1069 }
1070
1071 // If it's a constant, it should be a real immediate in range.
1072 if (auto ShiftedVal = getShiftedVal<12>())
1073 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1074
1075 // If it's an expression, we hope for the best and let the fixup/relocation
1076 // code deal with it.
1077 return true;
1078 }
1079
1080 bool isAddSubImmNeg() const {
1081 if (!isShiftedImm() && !isImm())
1082 return false;
1083
1084 // Otherwise it should be a real negative immediate in range.
1085 if (auto ShiftedVal = getShiftedVal<12>())
1086 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1087
1088 return false;
1089 }
1090
1091 // Signed value in the range -128 to +127. For element widths of
1092 // 16 bits or higher it may also be a signed multiple of 256 in the
1093 // range -32768 to +32512.
1094 // For element-width of 8 bits a range of -128 to 255 is accepted,
1095 // since a copy of a byte can be either signed/unsigned.
1096 template <typename T>
1097 DiagnosticPredicate isSVECpyImm() const {
1098 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1100
1101 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1102 std::is_same<int8_t, T>::value;
1103 if (auto ShiftedImm = getShiftedVal<8>())
1104 if (!(IsByte && ShiftedImm->second) &&
1105 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1106 << ShiftedImm->second))
1108
1110 }
1111
1112 // Unsigned value in the range 0 to 255. For element widths of
1113 // 16 bits or higher it may also be a signed multiple of 256 in the
1114 // range 0 to 65280.
1115 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1116 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1118
1119 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1120 std::is_same<int8_t, T>::value;
1121 if (auto ShiftedImm = getShiftedVal<8>())
1122 if (!(IsByte && ShiftedImm->second) &&
1123 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1124 << ShiftedImm->second))
1126
1128 }
1129
1130 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1131 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1134 }
1135
1136 bool isCondCode() const { return Kind == k_CondCode; }
1137
1138 bool isSIMDImmType10() const {
1139 if (!isImm())
1140 return false;
1141 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1142 if (!MCE)
1143 return false;
1145 }
1146
1147 template<int N>
1148 bool isBranchTarget() const {
1149 if (!isImm())
1150 return false;
1151 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1152 if (!MCE)
1153 return true;
1154 int64_t Val = MCE->getValue();
1155 if (Val & 0x3)
1156 return false;
1157 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1158 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1159 }
1160
1161 bool isMovWSymbol(ArrayRef<AArch64::Specifier> AllowedModifiers) const {
1162 if (!isImm())
1163 return false;
1164
1165 AArch64::Specifier ELFSpec;
1166 AArch64::Specifier DarwinSpec;
1167 int64_t Addend;
1168 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFSpec, DarwinSpec,
1169 Addend)) {
1170 return false;
1171 }
1172 if (DarwinSpec != AArch64::S_None)
1173 return false;
1174
1175 return llvm::is_contained(AllowedModifiers, ELFSpec);
1176 }
1177
1178 bool isMovWSymbolG3() const {
1179 return isMovWSymbol({AArch64::S_ABS_G3, AArch64::S_PREL_G3});
1180 }
1181
1182 bool isMovWSymbolG2() const {
1183 return isMovWSymbol({AArch64::S_ABS_G2, AArch64::S_ABS_G2_S,
1187 }
1188
1189 bool isMovWSymbolG1() const {
1190 return isMovWSymbol({AArch64::S_ABS_G1, AArch64::S_ABS_G1_S,
1195 }
1196
1197 bool isMovWSymbolG0() const {
1198 return isMovWSymbol({AArch64::S_ABS_G0, AArch64::S_ABS_G0_S,
1203 }
1204
1205 template<int RegWidth, int Shift>
1206 bool isMOVZMovAlias() const {
1207 if (!isImm()) return false;
1208
1209 const MCExpr *E = getImm();
1210 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1211 uint64_t Value = CE->getValue();
1212
1213 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1214 }
1215 // Only supports the case of Shift being 0 if an expression is used as an
1216 // operand
1217 return !Shift && E;
1218 }
1219
1220 template<int RegWidth, int Shift>
1221 bool isMOVNMovAlias() const {
1222 if (!isImm()) return false;
1223
1224 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1225 if (!CE) return false;
1226 uint64_t Value = CE->getValue();
1227
1228 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1229 }
1230
1231 bool isFPImm() const {
1232 return Kind == k_FPImm &&
1233 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1234 }
1235
1236 bool isBarrier() const {
1237 return Kind == k_Barrier && !getBarriernXSModifier();
1238 }
1239 bool isBarriernXS() const {
1240 return Kind == k_Barrier && getBarriernXSModifier();
1241 }
1242 bool isSysReg() const { return Kind == k_SysReg; }
1243
1244 bool isMRSSystemRegister() const {
1245 if (!isSysReg()) return false;
1246
1247 return SysReg.MRSReg != -1U;
1248 }
1249
1250 bool isMSRSystemRegister() const {
1251 if (!isSysReg()) return false;
1252 return SysReg.MSRReg != -1U;
1253 }
1254
1255 bool isSystemPStateFieldWithImm0_1() const {
1256 if (!isSysReg()) return false;
1257 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1258 }
1259
1260 bool isSystemPStateFieldWithImm0_15() const {
1261 if (!isSysReg())
1262 return false;
1263 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1264 }
1265
1266 bool isSVCR() const {
1267 if (Kind != k_SVCR)
1268 return false;
1269 return SVCR.PStateField != -1U;
1270 }
1271
1272 bool isReg() const override {
1273 return Kind == k_Register;
1274 }
1275
1276 bool isVectorList() const { return Kind == k_VectorList; }
1277
1278 bool isScalarReg() const {
1279 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1280 }
1281
1282 bool isNeonVectorReg() const {
1283 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1284 }
1285
1286 bool isNeonVectorRegLo() const {
1287 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1288 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1289 Reg.Reg) ||
1290 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1291 Reg.Reg));
1292 }
1293
1294 bool isNeonVectorReg0to7() const {
1295 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1296 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1297 Reg.Reg));
1298 }
1299
1300 bool isMatrix() const { return Kind == k_MatrixRegister; }
1301 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1302
1303 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1304 RegKind RK;
1305 switch (Class) {
1306 case AArch64::PPRRegClassID:
1307 case AArch64::PPR_3bRegClassID:
1308 case AArch64::PPR_p8to15RegClassID:
1309 case AArch64::PNRRegClassID:
1310 case AArch64::PNR_p8to15RegClassID:
1311 case AArch64::PPRorPNRRegClassID:
1312 RK = RegKind::SVEPredicateAsCounter;
1313 break;
1314 default:
1315 llvm_unreachable("Unsupported register class");
1316 }
1317
1318 return (Kind == k_Register && Reg.Kind == RK) &&
1319 AArch64MCRegisterClasses[Class].contains(getReg());
1320 }
1321
1322 template <unsigned Class> bool isSVEVectorReg() const {
1323 RegKind RK;
1324 switch (Class) {
1325 case AArch64::ZPRRegClassID:
1326 case AArch64::ZPR_3bRegClassID:
1327 case AArch64::ZPR_4bRegClassID:
1328 case AArch64::ZPRMul2_LoRegClassID:
1329 case AArch64::ZPRMul2_HiRegClassID:
1330 case AArch64::ZPR_KRegClassID:
1331 RK = RegKind::SVEDataVector;
1332 break;
1333 case AArch64::PPRRegClassID:
1334 case AArch64::PPR_3bRegClassID:
1335 case AArch64::PPR_p8to15RegClassID:
1336 case AArch64::PNRRegClassID:
1337 case AArch64::PNR_p8to15RegClassID:
1338 case AArch64::PPRorPNRRegClassID:
1339 RK = RegKind::SVEPredicateVector;
1340 break;
1341 default:
1342 llvm_unreachable("Unsupported register class");
1343 }
1344
1345 return (Kind == k_Register && Reg.Kind == RK) &&
1346 AArch64MCRegisterClasses[Class].contains(getReg());
1347 }
1348
1349 template <unsigned Class> bool isFPRasZPR() const {
1350 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1351 AArch64MCRegisterClasses[Class].contains(getReg());
1352 }
1353
1354 template <int ElementWidth, unsigned Class>
1355 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1356 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1358
1359 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1361
1363 }
1364
1365 template <int ElementWidth, unsigned Class>
1366 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1367 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1368 Reg.Kind != RegKind::SVEPredicateVector))
1370
1371 if ((isSVEPredicateAsCounterReg<Class>() ||
1372 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1373 Reg.ElementWidth == ElementWidth)
1375
1377 }
1378
1379 template <int ElementWidth, unsigned Class>
1380 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1381 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1383
1384 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1386
1388 }
1389
1390 template <int ElementWidth, unsigned Class>
1391 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1392 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1394
1395 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1397
1399 }
1400
1401 template <int ElementWidth, unsigned Class,
1402 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1403 bool ShiftWidthAlwaysSame>
1404 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1405 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1406 if (!VectorMatch.isMatch())
1408
1409 // Give a more specific diagnostic when the user has explicitly typed in
1410 // a shift-amount that does not match what is expected, but for which
1411 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1412 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1413 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1414 ShiftExtendTy == AArch64_AM::SXTW) &&
1415 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1417
1418 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1420
1422 }
1423
1424 bool isGPR32as64() const {
1425 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1426 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.Reg);
1427 }
1428
1429 bool isGPR64as32() const {
1430 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1431 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.Reg);
1432 }
1433
1434 bool isGPR64x8() const {
1435 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1436 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1437 Reg.Reg);
1438 }
1439
1440 bool isWSeqPair() const {
1441 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1442 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1443 Reg.Reg);
1444 }
1445
1446 bool isXSeqPair() const {
1447 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1448 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1449 Reg.Reg);
1450 }
1451
1452 bool isSyspXzrPair() const {
1453 return isGPR64<AArch64::GPR64RegClassID>() && Reg.Reg == AArch64::XZR;
1454 }
1455
1456 template<int64_t Angle, int64_t Remainder>
1457 DiagnosticPredicate isComplexRotation() const {
1458 if (!isImm())
1460
1461 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1462 if (!CE)
1464 uint64_t Value = CE->getValue();
1465
1466 if (Value % Angle == Remainder && Value <= 270)
1469 }
1470
1471 template <unsigned RegClassID> bool isGPR64() const {
1472 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1473 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1474 }
1475
1476 template <unsigned RegClassID, int ExtWidth>
1477 DiagnosticPredicate isGPR64WithShiftExtend() const {
1478 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1480
1481 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1482 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1485 }
1486
1487 /// Is this a vector list with the type implicit (presumably attached to the
1488 /// instruction itself)?
1489 template <RegKind VectorKind, unsigned NumRegs, bool IsConsecutive = false>
1490 bool isImplicitlyTypedVectorList() const {
1491 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1492 VectorList.NumElements == 0 &&
1493 VectorList.RegisterKind == VectorKind &&
1494 (!IsConsecutive || (VectorList.Stride == 1));
1495 }
1496
1497 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1498 unsigned ElementWidth, unsigned Stride = 1>
1499 bool isTypedVectorList() const {
1500 if (Kind != k_VectorList)
1501 return false;
1502 if (VectorList.Count != NumRegs)
1503 return false;
1504 if (VectorList.RegisterKind != VectorKind)
1505 return false;
1506 if (VectorList.ElementWidth != ElementWidth)
1507 return false;
1508 if (VectorList.Stride != Stride)
1509 return false;
1510 return VectorList.NumElements == NumElements;
1511 }
1512
1513 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1514 unsigned ElementWidth, unsigned RegClass>
1515 DiagnosticPredicate isTypedVectorListMultiple() const {
1516 bool Res =
1517 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1518 if (!Res)
1520 if (!AArch64MCRegisterClasses[RegClass].contains(VectorList.Reg))
1523 }
1524
1525 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1526 unsigned ElementWidth>
1527 DiagnosticPredicate isTypedVectorListStrided() const {
1528 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1529 ElementWidth, Stride>();
1530 if (!Res)
1532 if ((VectorList.Reg < (AArch64::Z0 + Stride)) ||
1533 ((VectorList.Reg >= AArch64::Z16) &&
1534 (VectorList.Reg < (AArch64::Z16 + Stride))))
1537 }
1538
1539 template <int Min, int Max>
1540 DiagnosticPredicate isVectorIndex() const {
1541 if (Kind != k_VectorIndex)
1543 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1546 }
1547
1548 bool isToken() const override { return Kind == k_Token; }
1549
1550 bool isTokenEqual(StringRef Str) const {
1551 return Kind == k_Token && getToken() == Str;
1552 }
1553 bool isSysCR() const { return Kind == k_SysCR; }
1554 bool isPrefetch() const { return Kind == k_Prefetch; }
1555 bool isPSBHint() const { return Kind == k_PSBHint; }
1556 bool isPHint() const { return Kind == k_PHint; }
1557 bool isBTIHint() const { return Kind == k_BTIHint; }
1558 bool isCMHPriorityHint() const { return Kind == k_CMHPriorityHint; }
1559 bool isTIndexHint() const { return Kind == k_TIndexHint; }
1560 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1561 bool isShifter() const {
1562 if (!isShiftExtend())
1563 return false;
1564
1565 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1566 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1567 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1568 ST == AArch64_AM::MSL);
1569 }
1570
1571 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1572 if (Kind != k_FPImm)
1574
1575 if (getFPImmIsExact()) {
1576 // Lookup the immediate from table of supported immediates.
1577 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1578 assert(Desc && "Unknown enum value");
1579
1580 // Calculate its FP value.
1581 APFloat RealVal(APFloat::IEEEdouble());
1582 auto StatusOrErr =
1583 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1584 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1585 llvm_unreachable("FP immediate is not exact");
1586
1587 if (getFPImm().bitwiseIsEqual(RealVal))
1589 }
1590
1592 }
1593
1594 template <unsigned ImmA, unsigned ImmB>
1595 DiagnosticPredicate isExactFPImm() const {
1596 DiagnosticPredicate Res = DiagnosticPredicate::NoMatch;
1597 if ((Res = isExactFPImm<ImmA>()))
1599 if ((Res = isExactFPImm<ImmB>()))
1601 return Res;
1602 }
1603
1604 bool isExtend() const {
1605 if (!isShiftExtend())
1606 return false;
1607
1608 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1609 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1610 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1611 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1612 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1613 ET == AArch64_AM::LSL) &&
1614 getShiftExtendAmount() <= 4;
1615 }
1616
1617 bool isExtend64() const {
1618 if (!isExtend())
1619 return false;
1620 // Make sure the extend expects a 32-bit source register.
1621 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1622 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1623 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1624 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1625 }
1626
1627 bool isExtendLSL64() const {
1628 if (!isExtend())
1629 return false;
1630 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1631 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1632 ET == AArch64_AM::LSL) &&
1633 getShiftExtendAmount() <= 4;
1634 }
1635
1636 bool isLSLImm3Shift() const {
1637 if (!isShiftExtend())
1638 return false;
1639 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1640 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1641 }
1642
1643 template<int Width> bool isMemXExtend() const {
1644 if (!isExtend())
1645 return false;
1646 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1647 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1648 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1649 getShiftExtendAmount() == 0);
1650 }
1651
1652 template<int Width> bool isMemWExtend() const {
1653 if (!isExtend())
1654 return false;
1655 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1656 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1657 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1658 getShiftExtendAmount() == 0);
1659 }
1660
1661 template <unsigned width>
1662 bool isArithmeticShifter() const {
1663 if (!isShifter())
1664 return false;
1665
1666 // An arithmetic shifter is LSL, LSR, or ASR.
1667 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1668 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1669 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1670 }
1671
1672 template <unsigned width>
1673 bool isLogicalShifter() const {
1674 if (!isShifter())
1675 return false;
1676
1677 // A logical shifter is LSL, LSR, ASR or ROR.
1678 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1679 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1680 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1681 getShiftExtendAmount() < width;
1682 }
1683
1684 bool isMovImm32Shifter() const {
1685 if (!isShifter())
1686 return false;
1687
1688 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1689 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1690 if (ST != AArch64_AM::LSL)
1691 return false;
1692 uint64_t Val = getShiftExtendAmount();
1693 return (Val == 0 || Val == 16);
1694 }
1695
1696 bool isMovImm64Shifter() const {
1697 if (!isShifter())
1698 return false;
1699
1700 // A MOVi shifter is LSL of 0 or 16.
1701 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1702 if (ST != AArch64_AM::LSL)
1703 return false;
1704 uint64_t Val = getShiftExtendAmount();
1705 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1706 }
1707
1708 bool isLogicalVecShifter() const {
1709 if (!isShifter())
1710 return false;
1711
1712 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1713 unsigned Shift = getShiftExtendAmount();
1714 return getShiftExtendType() == AArch64_AM::LSL &&
1715 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1716 }
1717
1718 bool isLogicalVecHalfWordShifter() const {
1719 if (!isLogicalVecShifter())
1720 return false;
1721
1722 // A logical vector shifter is a left shift by 0 or 8.
1723 unsigned Shift = getShiftExtendAmount();
1724 return getShiftExtendType() == AArch64_AM::LSL &&
1725 (Shift == 0 || Shift == 8);
1726 }
1727
1728 bool isMoveVecShifter() const {
1729 if (!isShiftExtend())
1730 return false;
1731
1732 // A logical vector shifter is a left shift by 8 or 16.
1733 unsigned Shift = getShiftExtendAmount();
1734 return getShiftExtendType() == AArch64_AM::MSL &&
1735 (Shift == 8 || Shift == 16);
1736 }
1737
1738 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1739 // to LDUR/STUR when the offset is not legal for the former but is for
1740 // the latter. As such, in addition to checking for being a legal unscaled
1741 // address, also check that it is not a legal scaled address. This avoids
1742 // ambiguity in the matcher.
1743 template<int Width>
1744 bool isSImm9OffsetFB() const {
1745 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1746 }
1747
1748 bool isAdrpLabel() const {
1749 // Validation was handled during parsing, so we just verify that
1750 // something didn't go haywire.
1751 if (!isImm())
1752 return false;
1753
1754 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1755 int64_t Val = CE->getValue();
1756 int64_t Min = - (4096 * (1LL << (21 - 1)));
1757 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1758 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1759 }
1760
1761 return true;
1762 }
1763
1764 bool isAdrLabel() const {
1765 // Validation was handled during parsing, so we just verify that
1766 // something didn't go haywire.
1767 if (!isImm())
1768 return false;
1769
1770 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1771 int64_t Val = CE->getValue();
1772 int64_t Min = - (1LL << (21 - 1));
1773 int64_t Max = ((1LL << (21 - 1)) - 1);
1774 return Val >= Min && Val <= Max;
1775 }
1776
1777 return true;
1778 }
1779
1780 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1781 DiagnosticPredicate isMatrixRegOperand() const {
1782 if (!isMatrix())
1784 if (getMatrixKind() != Kind ||
1785 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1786 EltSize != getMatrixElementWidth())
1789 }
1790
1791 bool isPAuthPCRelLabel16Operand() const {
1792 // PAuth PCRel16 operands are similar to regular branch targets, but only
1793 // negative values are allowed for concrete immediates as signing instr
1794 // should be in a lower address.
1795 if (!isImm())
1796 return false;
1797 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1798 if (!MCE)
1799 return true;
1800 int64_t Val = MCE->getValue();
1801 if (Val & 0b11)
1802 return false;
1803 return (Val <= 0) && (Val > -(1 << 18));
1804 }
1805
1806 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1807 // Add as immediates when possible. Null MCExpr = 0.
1808 if (!Expr)
1810 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1811 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1812 else
1814 }
1815
1816 void addRegOperands(MCInst &Inst, unsigned N) const {
1817 assert(N == 1 && "Invalid number of operands!");
1819 }
1820
1821 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1822 assert(N == 1 && "Invalid number of operands!");
1823 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1824 }
1825
1826 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1827 assert(N == 1 && "Invalid number of operands!");
1828 assert(
1829 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1830
1831 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1832 MCRegister Reg = RI->getRegClass(AArch64::GPR32RegClassID)
1834
1836 }
1837
1838 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1839 assert(N == 1 && "Invalid number of operands!");
1840 assert(
1841 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1842
1843 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1844 MCRegister Reg = RI->getRegClass(AArch64::GPR64RegClassID)
1846
1848 }
1849
1850 template <int Width>
1851 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1852 unsigned Base;
1853 switch (Width) {
1854 case 8: Base = AArch64::B0; break;
1855 case 16: Base = AArch64::H0; break;
1856 case 32: Base = AArch64::S0; break;
1857 case 64: Base = AArch64::D0; break;
1858 case 128: Base = AArch64::Q0; break;
1859 default:
1860 llvm_unreachable("Unsupported width");
1861 }
1862 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1863 }
1864
1865 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1866 assert(N == 1 && "Invalid number of operands!");
1867 MCRegister Reg = getReg();
1868 // Normalise to PPR
1869 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1870 Reg = Reg - AArch64::PN0 + AArch64::P0;
1872 }
1873
1874 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1875 assert(N == 1 && "Invalid number of operands!");
1876 Inst.addOperand(
1877 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1878 }
1879
1880 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1881 assert(N == 1 && "Invalid number of operands!");
1882 assert(
1883 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1884 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1885 }
1886
1887 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1888 assert(N == 1 && "Invalid number of operands!");
1889 assert(
1890 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1892 }
1893
1894 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1895 assert(N == 1 && "Invalid number of operands!");
1897 }
1898
1899 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1900 assert(N == 1 && "Invalid number of operands!");
1902 }
1903
1904 enum VecListIndexType {
1905 VecListIdx_DReg = 0,
1906 VecListIdx_QReg = 1,
1907 VecListIdx_ZReg = 2,
1908 VecListIdx_PReg = 3,
1909 };
1910
1911 template <VecListIndexType RegTy, unsigned NumRegs,
1912 bool IsConsecutive = false>
1913 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1914 assert(N == 1 && "Invalid number of operands!");
1915 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1916 "Expected consecutive registers");
1917 static const unsigned FirstRegs[][5] = {
1918 /* DReg */ { AArch64::Q0,
1919 AArch64::D0, AArch64::D0_D1,
1920 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1921 /* QReg */ { AArch64::Q0,
1922 AArch64::Q0, AArch64::Q0_Q1,
1923 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1924 /* ZReg */ { AArch64::Z0,
1925 AArch64::Z0, AArch64::Z0_Z1,
1926 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1927 /* PReg */ { AArch64::P0,
1928 AArch64::P0, AArch64::P0_P1 }
1929 };
1930
1931 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1932 " NumRegs must be <= 4 for ZRegs");
1933
1934 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1935 " NumRegs must be <= 2 for PRegs");
1936
1937 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1938 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1939 FirstRegs[(unsigned)RegTy][0]));
1940 }
1941
1942 template <unsigned NumRegs>
1943 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1944 assert(N == 1 && "Invalid number of operands!");
1945 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1946
1947 switch (NumRegs) {
1948 case 2:
1949 if (getVectorListStart() < AArch64::Z16) {
1950 assert((getVectorListStart() < AArch64::Z8) &&
1951 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1953 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1954 } else {
1955 assert((getVectorListStart() < AArch64::Z24) &&
1956 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1958 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1959 }
1960 break;
1961 case 4:
1962 if (getVectorListStart() < AArch64::Z16) {
1963 assert((getVectorListStart() < AArch64::Z4) &&
1964 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1966 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1967 } else {
1968 assert((getVectorListStart() < AArch64::Z20) &&
1969 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1971 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1972 }
1973 break;
1974 default:
1975 llvm_unreachable("Unsupported number of registers for strided vec list");
1976 }
1977 }
1978
1979 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1980 assert(N == 1 && "Invalid number of operands!");
1981 unsigned RegMask = getMatrixTileListRegMask();
1982 assert(RegMask <= 0xFF && "Invalid mask!");
1983 Inst.addOperand(MCOperand::createImm(RegMask));
1984 }
1985
1986 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1987 assert(N == 1 && "Invalid number of operands!");
1988 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1989 }
1990
1991 template <unsigned ImmIs0, unsigned ImmIs1>
1992 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1993 assert(N == 1 && "Invalid number of operands!");
1994 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1995 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1996 }
1997
1998 void addImmOperands(MCInst &Inst, unsigned N) const {
1999 assert(N == 1 && "Invalid number of operands!");
2000 // If this is a pageoff symrefexpr with an addend, adjust the addend
2001 // to be only the page-offset portion. Otherwise, just add the expr
2002 // as-is.
2003 addExpr(Inst, getImm());
2004 }
2005
2006 template <int Shift>
2007 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2008 assert(N == 2 && "Invalid number of operands!");
2009 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2010 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
2011 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
2012 } else if (isShiftedImm()) {
2013 addExpr(Inst, getShiftedImmVal());
2014 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
2015 } else {
2016 addExpr(Inst, getImm());
2018 }
2019 }
2020
2021 template <int Shift>
2022 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2023 assert(N == 2 && "Invalid number of operands!");
2024 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2025 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
2026 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
2027 } else
2028 llvm_unreachable("Not a shifted negative immediate");
2029 }
2030
2031 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2032 assert(N == 1 && "Invalid number of operands!");
2034 }
2035
2036 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
2037 assert(N == 1 && "Invalid number of operands!");
2038 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2039 if (!MCE)
2040 addExpr(Inst, getImm());
2041 else
2042 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
2043 }
2044
2045 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2046 addImmOperands(Inst, N);
2047 }
2048
2049 template<int Scale>
2050 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2051 assert(N == 1 && "Invalid number of operands!");
2052 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2053
2054 if (!MCE) {
2056 return;
2057 }
2058 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2059 }
2060
2061 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2062 assert(N == 1 && "Invalid number of operands!");
2063 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2065 }
2066
2067 template <int Scale>
2068 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2069 assert(N == 1 && "Invalid number of operands!");
2070 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2071 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2072 }
2073
2074 template <int Scale>
2075 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2076 assert(N == 1 && "Invalid number of operands!");
2077 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2078 }
2079
2080 template <typename T>
2081 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2082 assert(N == 1 && "Invalid number of operands!");
2083 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2084 std::make_unsigned_t<T> Val = MCE->getValue();
2085 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2086 Inst.addOperand(MCOperand::createImm(encoding));
2087 }
2088
2089 template <typename T>
2090 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2091 assert(N == 1 && "Invalid number of operands!");
2092 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2093 std::make_unsigned_t<T> Val = ~MCE->getValue();
2094 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2095 Inst.addOperand(MCOperand::createImm(encoding));
2096 }
2097
2098 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2099 assert(N == 1 && "Invalid number of operands!");
2100 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2101 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
2102 Inst.addOperand(MCOperand::createImm(encoding));
2103 }
2104
2105 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2106 // Branch operands don't encode the low bits, so shift them off
2107 // here. If it's a label, however, just put it on directly as there's
2108 // not enough information now to do anything.
2109 assert(N == 1 && "Invalid number of operands!");
2110 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2111 if (!MCE) {
2112 addExpr(Inst, getImm());
2113 return;
2114 }
2115 assert(MCE && "Invalid constant immediate operand!");
2116 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2117 }
2118
2119 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2120 // PC-relative operands don't encode the low bits, so shift them off
2121 // here. If it's a label, however, just put it on directly as there's
2122 // not enough information now to do anything.
2123 assert(N == 1 && "Invalid number of operands!");
2124 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2125 if (!MCE) {
2126 addExpr(Inst, getImm());
2127 return;
2128 }
2129 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2130 }
2131
2132 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2133 // Branch operands don't encode the low bits, so shift them off
2134 // here. If it's a label, however, just put it on directly as there's
2135 // not enough information now to do anything.
2136 assert(N == 1 && "Invalid number of operands!");
2137 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2138 if (!MCE) {
2139 addExpr(Inst, getImm());
2140 return;
2141 }
2142 assert(MCE && "Invalid constant immediate operand!");
2143 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2144 }
2145
2146 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2147 // Branch operands don't encode the low bits, so shift them off
2148 // here. If it's a label, however, just put it on directly as there's
2149 // not enough information now to do anything.
2150 assert(N == 1 && "Invalid number of operands!");
2151 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2152 if (!MCE) {
2153 addExpr(Inst, getImm());
2154 return;
2155 }
2156 assert(MCE && "Invalid constant immediate operand!");
2157 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2158 }
2159
2160 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2161 // Branch operands don't encode the low bits, so shift them off
2162 // here. If it's a label, however, just put it on directly as there's
2163 // not enough information now to do anything.
2164 assert(N == 1 && "Invalid number of operands!");
2165 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2166 if (!MCE) {
2167 addExpr(Inst, getImm());
2168 return;
2169 }
2170 assert(MCE && "Invalid constant immediate operand!");
2171 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2172 }
2173
2174 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2175 assert(N == 1 && "Invalid number of operands!");
2177 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2178 }
2179
2180 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2181 assert(N == 1 && "Invalid number of operands!");
2182 Inst.addOperand(MCOperand::createImm(getBarrier()));
2183 }
2184
2185 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2186 assert(N == 1 && "Invalid number of operands!");
2187 Inst.addOperand(MCOperand::createImm(getBarrier()));
2188 }
2189
2190 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2191 assert(N == 1 && "Invalid number of operands!");
2192
2193 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2194 }
2195
2196 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2197 assert(N == 1 && "Invalid number of operands!");
2198
2199 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2200 }
2201
2202 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2203 assert(N == 1 && "Invalid number of operands!");
2204
2205 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2206 }
2207
2208 void addSVCROperands(MCInst &Inst, unsigned N) const {
2209 assert(N == 1 && "Invalid number of operands!");
2210
2211 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2212 }
2213
2214 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2215 assert(N == 1 && "Invalid number of operands!");
2216
2217 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2218 }
2219
2220 void addSysCROperands(MCInst &Inst, unsigned N) const {
2221 assert(N == 1 && "Invalid number of operands!");
2222 Inst.addOperand(MCOperand::createImm(getSysCR()));
2223 }
2224
2225 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2226 assert(N == 1 && "Invalid number of operands!");
2227 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2228 }
2229
2230 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2231 assert(N == 1 && "Invalid number of operands!");
2232 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2233 }
2234
2235 void addPHintOperands(MCInst &Inst, unsigned N) const {
2236 assert(N == 1 && "Invalid number of operands!");
2237 Inst.addOperand(MCOperand::createImm(getPHint()));
2238 }
2239
2240 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2241 assert(N == 1 && "Invalid number of operands!");
2242 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2243 }
2244
2245 void addCMHPriorityHintOperands(MCInst &Inst, unsigned N) const {
2246 assert(N == 1 && "Invalid number of operands!");
2247 Inst.addOperand(MCOperand::createImm(getCMHPriorityHint()));
2248 }
2249
2250 void addTIndexHintOperands(MCInst &Inst, unsigned N) const {
2251 assert(N == 1 && "Invalid number of operands!");
2252 Inst.addOperand(MCOperand::createImm(getTIndexHint()));
2253 }
2254
2255 void addShifterOperands(MCInst &Inst, unsigned N) const {
2256 assert(N == 1 && "Invalid number of operands!");
2257 unsigned Imm =
2258 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2260 }
2261
2262 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2263 assert(N == 1 && "Invalid number of operands!");
2264 unsigned Imm = getShiftExtendAmount();
2266 }
2267
2268 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2269 assert(N == 1 && "Invalid number of operands!");
2270
2271 if (!isScalarReg())
2272 return;
2273
2274 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2275 MCRegister Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2277 if (Reg != AArch64::XZR)
2278 llvm_unreachable("wrong register");
2279
2280 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2281 }
2282
2283 void addExtendOperands(MCInst &Inst, unsigned N) const {
2284 assert(N == 1 && "Invalid number of operands!");
2285 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2286 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2287 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2289 }
2290
2291 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2292 assert(N == 1 && "Invalid number of operands!");
2293 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2294 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2295 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2297 }
2298
2299 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2300 assert(N == 2 && "Invalid number of operands!");
2301 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2302 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2303 Inst.addOperand(MCOperand::createImm(IsSigned));
2304 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2305 }
2306
2307 // For 8-bit load/store instructions with a register offset, both the
2308 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2309 // they're disambiguated by whether the shift was explicit or implicit rather
2310 // than its size.
2311 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2312 assert(N == 2 && "Invalid number of operands!");
2313 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2314 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2315 Inst.addOperand(MCOperand::createImm(IsSigned));
2316 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2317 }
2318
2319 template<int Shift>
2320 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2321 assert(N == 1 && "Invalid number of operands!");
2322
2323 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2324 if (CE) {
2325 uint64_t Value = CE->getValue();
2326 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2327 } else {
2328 addExpr(Inst, getImm());
2329 }
2330 }
2331
2332 template<int Shift>
2333 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2334 assert(N == 1 && "Invalid number of operands!");
2335
2336 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2337 uint64_t Value = CE->getValue();
2338 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2339 }
2340
2341 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2342 assert(N == 1 && "Invalid number of operands!");
2343 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2344 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2345 }
2346
2347 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2348 assert(N == 1 && "Invalid number of operands!");
2349 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2350 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2351 }
2352
2353 void print(raw_ostream &OS, const MCAsmInfo &MAI) const override;
2354
2355 static std::unique_ptr<AArch64Operand>
2356 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2357 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2358 Op->Tok.Data = Str.data();
2359 Op->Tok.Length = Str.size();
2360 Op->Tok.IsSuffix = IsSuffix;
2361 Op->StartLoc = S;
2362 Op->EndLoc = S;
2363 return Op;
2364 }
2365
2366 static std::unique_ptr<AArch64Operand>
2367 CreateReg(MCRegister Reg, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2368 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2370 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2371 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2372 Op->Reg.Reg = Reg;
2373 Op->Reg.Kind = Kind;
2374 Op->Reg.ElementWidth = 0;
2375 Op->Reg.EqualityTy = EqTy;
2376 Op->Reg.ShiftExtend.Type = ExtTy;
2377 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2378 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2379 Op->StartLoc = S;
2380 Op->EndLoc = E;
2381 return Op;
2382 }
2383
2384 static std::unique_ptr<AArch64Operand> CreateVectorReg(
2385 MCRegister Reg, RegKind Kind, unsigned ElementWidth, SMLoc S, SMLoc E,
2386 MCContext &Ctx, AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2387 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2388 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2389 Kind == RegKind::SVEPredicateVector ||
2390 Kind == RegKind::SVEPredicateAsCounter) &&
2391 "Invalid vector kind");
2392 auto Op = CreateReg(Reg, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2393 HasExplicitAmount);
2394 Op->Reg.ElementWidth = ElementWidth;
2395 return Op;
2396 }
2397
2398 static std::unique_ptr<AArch64Operand>
2399 CreateVectorList(MCRegister Reg, unsigned Count, unsigned Stride,
2400 unsigned NumElements, unsigned ElementWidth,
2401 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2402 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2403 Op->VectorList.Reg = Reg;
2404 Op->VectorList.Count = Count;
2405 Op->VectorList.Stride = Stride;
2406 Op->VectorList.NumElements = NumElements;
2407 Op->VectorList.ElementWidth = ElementWidth;
2408 Op->VectorList.RegisterKind = RegisterKind;
2409 Op->StartLoc = S;
2410 Op->EndLoc = E;
2411 return Op;
2412 }
2413
2414 static std::unique_ptr<AArch64Operand>
2415 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2416 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2417 Op->VectorIndex.Val = Idx;
2418 Op->StartLoc = S;
2419 Op->EndLoc = E;
2420 return Op;
2421 }
2422
2423 static std::unique_ptr<AArch64Operand>
2424 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2425 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2426 Op->MatrixTileList.RegMask = RegMask;
2427 Op->StartLoc = S;
2428 Op->EndLoc = E;
2429 return Op;
2430 }
2431
2432 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2433 const unsigned ElementWidth) {
2434 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2435 RegMap = {
2436 {{0, AArch64::ZAB0},
2437 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2438 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2439 {{8, AArch64::ZAB0},
2440 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2441 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2442 {{16, AArch64::ZAH0},
2443 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2444 {{16, AArch64::ZAH1},
2445 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2446 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2447 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2448 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2449 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2450 };
2451
2452 if (ElementWidth == 64)
2453 OutRegs.insert(Reg);
2454 else {
2455 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2456 assert(!Regs.empty() && "Invalid tile or element width!");
2457 OutRegs.insert_range(Regs);
2458 }
2459 }
2460
2461 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2462 SMLoc E, MCContext &Ctx) {
2463 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2464 Op->Imm.Val = Val;
2465 Op->StartLoc = S;
2466 Op->EndLoc = E;
2467 return Op;
2468 }
2469
2470 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2471 unsigned ShiftAmount,
2472 SMLoc S, SMLoc E,
2473 MCContext &Ctx) {
2474 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2475 Op->ShiftedImm .Val = Val;
2476 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2477 Op->StartLoc = S;
2478 Op->EndLoc = E;
2479 return Op;
2480 }
2481
2482 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2483 unsigned Last, SMLoc S,
2484 SMLoc E,
2485 MCContext &Ctx) {
2486 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2487 Op->ImmRange.First = First;
2488 Op->ImmRange.Last = Last;
2489 Op->EndLoc = E;
2490 return Op;
2491 }
2492
2493 static std::unique_ptr<AArch64Operand>
2494 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2495 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2496 Op->CondCode.Code = Code;
2497 Op->StartLoc = S;
2498 Op->EndLoc = E;
2499 return Op;
2500 }
2501
2502 static std::unique_ptr<AArch64Operand>
2503 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2504 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2505 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2506 Op->FPImm.IsExact = IsExact;
2507 Op->StartLoc = S;
2508 Op->EndLoc = S;
2509 return Op;
2510 }
2511
2512 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2513 StringRef Str,
2514 SMLoc S,
2515 MCContext &Ctx,
2516 bool HasnXSModifier) {
2517 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2518 Op->Barrier.Val = Val;
2519 Op->Barrier.Data = Str.data();
2520 Op->Barrier.Length = Str.size();
2521 Op->Barrier.HasnXSModifier = HasnXSModifier;
2522 Op->StartLoc = S;
2523 Op->EndLoc = S;
2524 return Op;
2525 }
2526
2527 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2528 uint32_t MRSReg,
2529 uint32_t MSRReg,
2530 uint32_t PStateField,
2531 MCContext &Ctx) {
2532 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2533 Op->SysReg.Data = Str.data();
2534 Op->SysReg.Length = Str.size();
2535 Op->SysReg.MRSReg = MRSReg;
2536 Op->SysReg.MSRReg = MSRReg;
2537 Op->SysReg.PStateField = PStateField;
2538 Op->StartLoc = S;
2539 Op->EndLoc = S;
2540 return Op;
2541 }
2542
2543 static std::unique_ptr<AArch64Operand>
2544 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2545 auto Op = std::make_unique<AArch64Operand>(k_PHint, Ctx);
2546 Op->PHint.Val = Val;
2547 Op->PHint.Data = Str.data();
2548 Op->PHint.Length = Str.size();
2549 Op->StartLoc = S;
2550 Op->EndLoc = S;
2551 return Op;
2552 }
2553
2554 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2555 SMLoc E, MCContext &Ctx) {
2556 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2557 Op->SysCRImm.Val = Val;
2558 Op->StartLoc = S;
2559 Op->EndLoc = E;
2560 return Op;
2561 }
2562
2563 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2564 StringRef Str,
2565 SMLoc S,
2566 MCContext &Ctx) {
2567 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2568 Op->Prefetch.Val = Val;
2569 Op->Barrier.Data = Str.data();
2570 Op->Barrier.Length = Str.size();
2571 Op->StartLoc = S;
2572 Op->EndLoc = S;
2573 return Op;
2574 }
2575
2576 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2577 StringRef Str,
2578 SMLoc S,
2579 MCContext &Ctx) {
2580 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2581 Op->PSBHint.Val = Val;
2582 Op->PSBHint.Data = Str.data();
2583 Op->PSBHint.Length = Str.size();
2584 Op->StartLoc = S;
2585 Op->EndLoc = S;
2586 return Op;
2587 }
2588
2589 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2590 StringRef Str,
2591 SMLoc S,
2592 MCContext &Ctx) {
2593 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2594 Op->BTIHint.Val = Val | 32;
2595 Op->BTIHint.Data = Str.data();
2596 Op->BTIHint.Length = Str.size();
2597 Op->StartLoc = S;
2598 Op->EndLoc = S;
2599 return Op;
2600 }
2601
2602 static std::unique_ptr<AArch64Operand>
2603 CreateCMHPriorityHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2604 auto Op = std::make_unique<AArch64Operand>(k_CMHPriorityHint, Ctx);
2605 Op->CMHPriorityHint.Val = Val;
2606 Op->CMHPriorityHint.Data = Str.data();
2607 Op->CMHPriorityHint.Length = Str.size();
2608 Op->StartLoc = S;
2609 Op->EndLoc = S;
2610 return Op;
2611 }
2612
2613 static std::unique_ptr<AArch64Operand>
2614 CreateTIndexHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2615 auto Op = std::make_unique<AArch64Operand>(k_TIndexHint, Ctx);
2616 Op->TIndexHint.Val = Val;
2617 Op->TIndexHint.Data = Str.data();
2618 Op->TIndexHint.Length = Str.size();
2619 Op->StartLoc = S;
2620 Op->EndLoc = S;
2621 return Op;
2622 }
2623
2624 static std::unique_ptr<AArch64Operand>
2625 CreateMatrixRegister(MCRegister Reg, unsigned ElementWidth, MatrixKind Kind,
2626 SMLoc S, SMLoc E, MCContext &Ctx) {
2627 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2628 Op->MatrixReg.Reg = Reg;
2629 Op->MatrixReg.ElementWidth = ElementWidth;
2630 Op->MatrixReg.Kind = Kind;
2631 Op->StartLoc = S;
2632 Op->EndLoc = E;
2633 return Op;
2634 }
2635
2636 static std::unique_ptr<AArch64Operand>
2637 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2638 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2639 Op->SVCR.PStateField = PStateField;
2640 Op->SVCR.Data = Str.data();
2641 Op->SVCR.Length = Str.size();
2642 Op->StartLoc = S;
2643 Op->EndLoc = S;
2644 return Op;
2645 }
2646
2647 static std::unique_ptr<AArch64Operand>
2648 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2649 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2650 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2651 Op->ShiftExtend.Type = ShOp;
2652 Op->ShiftExtend.Amount = Val;
2653 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2654 Op->StartLoc = S;
2655 Op->EndLoc = E;
2656 return Op;
2657 }
2658};
2659
2660} // end anonymous namespace.
2661
2662void AArch64Operand::print(raw_ostream &OS, const MCAsmInfo &MAI) const {
2663 switch (Kind) {
2664 case k_FPImm:
2665 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2666 if (!getFPImmIsExact())
2667 OS << " (inexact)";
2668 OS << ">";
2669 break;
2670 case k_Barrier: {
2671 StringRef Name = getBarrierName();
2672 if (!Name.empty())
2673 OS << "<barrier " << Name << ">";
2674 else
2675 OS << "<barrier invalid #" << getBarrier() << ">";
2676 break;
2677 }
2678 case k_Immediate:
2679 MAI.printExpr(OS, *getImm());
2680 break;
2681 case k_ShiftedImm: {
2682 unsigned Shift = getShiftedImmShift();
2683 OS << "<shiftedimm ";
2684 MAI.printExpr(OS, *getShiftedImmVal());
2685 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2686 break;
2687 }
2688 case k_ImmRange: {
2689 OS << "<immrange ";
2690 OS << getFirstImmVal();
2691 OS << ":" << getLastImmVal() << ">";
2692 break;
2693 }
2694 case k_CondCode:
2695 OS << "<condcode " << getCondCode() << ">";
2696 break;
2697 case k_VectorList: {
2698 OS << "<vectorlist ";
2699 MCRegister Reg = getVectorListStart();
2700 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2701 OS << Reg.id() + i * getVectorListStride() << " ";
2702 OS << ">";
2703 break;
2704 }
2705 case k_VectorIndex:
2706 OS << "<vectorindex " << getVectorIndex() << ">";
2707 break;
2708 case k_SysReg:
2709 OS << "<sysreg: " << getSysReg() << '>';
2710 break;
2711 case k_Token:
2712 OS << "'" << getToken() << "'";
2713 break;
2714 case k_SysCR:
2715 OS << "c" << getSysCR();
2716 break;
2717 case k_Prefetch: {
2718 StringRef Name = getPrefetchName();
2719 if (!Name.empty())
2720 OS << "<prfop " << Name << ">";
2721 else
2722 OS << "<prfop invalid #" << getPrefetch() << ">";
2723 break;
2724 }
2725 case k_PSBHint:
2726 OS << getPSBHintName();
2727 break;
2728 case k_PHint:
2729 OS << getPHintName();
2730 break;
2731 case k_BTIHint:
2732 OS << getBTIHintName();
2733 break;
2734 case k_CMHPriorityHint:
2735 OS << getCMHPriorityHintName();
2736 break;
2737 case k_TIndexHint:
2738 OS << getTIndexHintName();
2739 break;
2740 case k_MatrixRegister:
2741 OS << "<matrix " << getMatrixReg().id() << ">";
2742 break;
2743 case k_MatrixTileList: {
2744 OS << "<matrixlist ";
2745 unsigned RegMask = getMatrixTileListRegMask();
2746 unsigned MaxBits = 8;
2747 for (unsigned I = MaxBits; I > 0; --I)
2748 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2749 OS << '>';
2750 break;
2751 }
2752 case k_SVCR: {
2753 OS << getSVCR();
2754 break;
2755 }
2756 case k_Register:
2757 OS << "<register " << getReg().id() << ">";
2758 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2759 break;
2760 [[fallthrough]];
2761 case k_ShiftExtend:
2762 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2763 << getShiftExtendAmount();
2764 if (!hasShiftExtendAmount())
2765 OS << "<imp>";
2766 OS << '>';
2767 break;
2768 }
2769}
2770
2771/// @name Auto-generated Match Functions
2772/// {
2773
2775
2776/// }
2777
2778static unsigned MatchNeonVectorRegName(StringRef Name) {
2779 return StringSwitch<unsigned>(Name.lower())
2780 .Case("v0", AArch64::Q0)
2781 .Case("v1", AArch64::Q1)
2782 .Case("v2", AArch64::Q2)
2783 .Case("v3", AArch64::Q3)
2784 .Case("v4", AArch64::Q4)
2785 .Case("v5", AArch64::Q5)
2786 .Case("v6", AArch64::Q6)
2787 .Case("v7", AArch64::Q7)
2788 .Case("v8", AArch64::Q8)
2789 .Case("v9", AArch64::Q9)
2790 .Case("v10", AArch64::Q10)
2791 .Case("v11", AArch64::Q11)
2792 .Case("v12", AArch64::Q12)
2793 .Case("v13", AArch64::Q13)
2794 .Case("v14", AArch64::Q14)
2795 .Case("v15", AArch64::Q15)
2796 .Case("v16", AArch64::Q16)
2797 .Case("v17", AArch64::Q17)
2798 .Case("v18", AArch64::Q18)
2799 .Case("v19", AArch64::Q19)
2800 .Case("v20", AArch64::Q20)
2801 .Case("v21", AArch64::Q21)
2802 .Case("v22", AArch64::Q22)
2803 .Case("v23", AArch64::Q23)
2804 .Case("v24", AArch64::Q24)
2805 .Case("v25", AArch64::Q25)
2806 .Case("v26", AArch64::Q26)
2807 .Case("v27", AArch64::Q27)
2808 .Case("v28", AArch64::Q28)
2809 .Case("v29", AArch64::Q29)
2810 .Case("v30", AArch64::Q30)
2811 .Case("v31", AArch64::Q31)
2812 .Default(0);
2813}
2814
2815/// Returns an optional pair of (#elements, element-width) if Suffix
2816/// is a valid vector kind. Where the number of elements in a vector
2817/// or the vector width is implicit or explicitly unknown (but still a
2818/// valid suffix kind), 0 is used.
2819static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2820 RegKind VectorKind) {
2821 std::pair<int, int> Res = {-1, -1};
2822
2823 switch (VectorKind) {
2824 case RegKind::NeonVector:
2826 .Case("", {0, 0})
2827 .Case(".1d", {1, 64})
2828 .Case(".1q", {1, 128})
2829 // '.2h' needed for fp16 scalar pairwise reductions
2830 .Case(".2h", {2, 16})
2831 .Case(".2b", {2, 8})
2832 .Case(".2s", {2, 32})
2833 .Case(".2d", {2, 64})
2834 // '.4b' is another special case for the ARMv8.2a dot product
2835 // operand
2836 .Case(".4b", {4, 8})
2837 .Case(".4h", {4, 16})
2838 .Case(".4s", {4, 32})
2839 .Case(".8b", {8, 8})
2840 .Case(".8h", {8, 16})
2841 .Case(".16b", {16, 8})
2842 // Accept the width neutral ones, too, for verbose syntax. If
2843 // those aren't used in the right places, the token operand won't
2844 // match so all will work out.
2845 .Case(".b", {0, 8})
2846 .Case(".h", {0, 16})
2847 .Case(".s", {0, 32})
2848 .Case(".d", {0, 64})
2849 .Default({-1, -1});
2850 break;
2851 case RegKind::SVEPredicateAsCounter:
2852 case RegKind::SVEPredicateVector:
2853 case RegKind::SVEDataVector:
2854 case RegKind::Matrix:
2856 .Case("", {0, 0})
2857 .Case(".b", {0, 8})
2858 .Case(".h", {0, 16})
2859 .Case(".s", {0, 32})
2860 .Case(".d", {0, 64})
2861 .Case(".q", {0, 128})
2862 .Default({-1, -1});
2863 break;
2864 default:
2865 llvm_unreachable("Unsupported RegKind");
2866 }
2867
2868 if (Res == std::make_pair(-1, -1))
2869 return std::nullopt;
2870
2871 return std::optional<std::pair<int, int>>(Res);
2872}
2873
2874static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2875 return parseVectorKind(Suffix, VectorKind).has_value();
2876}
2877
2879 return StringSwitch<unsigned>(Name.lower())
2880 .Case("z0", AArch64::Z0)
2881 .Case("z1", AArch64::Z1)
2882 .Case("z2", AArch64::Z2)
2883 .Case("z3", AArch64::Z3)
2884 .Case("z4", AArch64::Z4)
2885 .Case("z5", AArch64::Z5)
2886 .Case("z6", AArch64::Z6)
2887 .Case("z7", AArch64::Z7)
2888 .Case("z8", AArch64::Z8)
2889 .Case("z9", AArch64::Z9)
2890 .Case("z10", AArch64::Z10)
2891 .Case("z11", AArch64::Z11)
2892 .Case("z12", AArch64::Z12)
2893 .Case("z13", AArch64::Z13)
2894 .Case("z14", AArch64::Z14)
2895 .Case("z15", AArch64::Z15)
2896 .Case("z16", AArch64::Z16)
2897 .Case("z17", AArch64::Z17)
2898 .Case("z18", AArch64::Z18)
2899 .Case("z19", AArch64::Z19)
2900 .Case("z20", AArch64::Z20)
2901 .Case("z21", AArch64::Z21)
2902 .Case("z22", AArch64::Z22)
2903 .Case("z23", AArch64::Z23)
2904 .Case("z24", AArch64::Z24)
2905 .Case("z25", AArch64::Z25)
2906 .Case("z26", AArch64::Z26)
2907 .Case("z27", AArch64::Z27)
2908 .Case("z28", AArch64::Z28)
2909 .Case("z29", AArch64::Z29)
2910 .Case("z30", AArch64::Z30)
2911 .Case("z31", AArch64::Z31)
2912 .Default(0);
2913}
2914
2916 return StringSwitch<unsigned>(Name.lower())
2917 .Case("p0", AArch64::P0)
2918 .Case("p1", AArch64::P1)
2919 .Case("p2", AArch64::P2)
2920 .Case("p3", AArch64::P3)
2921 .Case("p4", AArch64::P4)
2922 .Case("p5", AArch64::P5)
2923 .Case("p6", AArch64::P6)
2924 .Case("p7", AArch64::P7)
2925 .Case("p8", AArch64::P8)
2926 .Case("p9", AArch64::P9)
2927 .Case("p10", AArch64::P10)
2928 .Case("p11", AArch64::P11)
2929 .Case("p12", AArch64::P12)
2930 .Case("p13", AArch64::P13)
2931 .Case("p14", AArch64::P14)
2932 .Case("p15", AArch64::P15)
2933 .Default(0);
2934}
2935
2937 return StringSwitch<unsigned>(Name.lower())
2938 .Case("pn0", AArch64::PN0)
2939 .Case("pn1", AArch64::PN1)
2940 .Case("pn2", AArch64::PN2)
2941 .Case("pn3", AArch64::PN3)
2942 .Case("pn4", AArch64::PN4)
2943 .Case("pn5", AArch64::PN5)
2944 .Case("pn6", AArch64::PN6)
2945 .Case("pn7", AArch64::PN7)
2946 .Case("pn8", AArch64::PN8)
2947 .Case("pn9", AArch64::PN9)
2948 .Case("pn10", AArch64::PN10)
2949 .Case("pn11", AArch64::PN11)
2950 .Case("pn12", AArch64::PN12)
2951 .Case("pn13", AArch64::PN13)
2952 .Case("pn14", AArch64::PN14)
2953 .Case("pn15", AArch64::PN15)
2954 .Default(0);
2955}
2956
2958 return StringSwitch<unsigned>(Name.lower())
2959 .Case("za0.d", AArch64::ZAD0)
2960 .Case("za1.d", AArch64::ZAD1)
2961 .Case("za2.d", AArch64::ZAD2)
2962 .Case("za3.d", AArch64::ZAD3)
2963 .Case("za4.d", AArch64::ZAD4)
2964 .Case("za5.d", AArch64::ZAD5)
2965 .Case("za6.d", AArch64::ZAD6)
2966 .Case("za7.d", AArch64::ZAD7)
2967 .Case("za0.s", AArch64::ZAS0)
2968 .Case("za1.s", AArch64::ZAS1)
2969 .Case("za2.s", AArch64::ZAS2)
2970 .Case("za3.s", AArch64::ZAS3)
2971 .Case("za0.h", AArch64::ZAH0)
2972 .Case("za1.h", AArch64::ZAH1)
2973 .Case("za0.b", AArch64::ZAB0)
2974 .Default(0);
2975}
2976
2977static unsigned matchMatrixRegName(StringRef Name) {
2978 return StringSwitch<unsigned>(Name.lower())
2979 .Case("za", AArch64::ZA)
2980 .Case("za0.q", AArch64::ZAQ0)
2981 .Case("za1.q", AArch64::ZAQ1)
2982 .Case("za2.q", AArch64::ZAQ2)
2983 .Case("za3.q", AArch64::ZAQ3)
2984 .Case("za4.q", AArch64::ZAQ4)
2985 .Case("za5.q", AArch64::ZAQ5)
2986 .Case("za6.q", AArch64::ZAQ6)
2987 .Case("za7.q", AArch64::ZAQ7)
2988 .Case("za8.q", AArch64::ZAQ8)
2989 .Case("za9.q", AArch64::ZAQ9)
2990 .Case("za10.q", AArch64::ZAQ10)
2991 .Case("za11.q", AArch64::ZAQ11)
2992 .Case("za12.q", AArch64::ZAQ12)
2993 .Case("za13.q", AArch64::ZAQ13)
2994 .Case("za14.q", AArch64::ZAQ14)
2995 .Case("za15.q", AArch64::ZAQ15)
2996 .Case("za0.d", AArch64::ZAD0)
2997 .Case("za1.d", AArch64::ZAD1)
2998 .Case("za2.d", AArch64::ZAD2)
2999 .Case("za3.d", AArch64::ZAD3)
3000 .Case("za4.d", AArch64::ZAD4)
3001 .Case("za5.d", AArch64::ZAD5)
3002 .Case("za6.d", AArch64::ZAD6)
3003 .Case("za7.d", AArch64::ZAD7)
3004 .Case("za0.s", AArch64::ZAS0)
3005 .Case("za1.s", AArch64::ZAS1)
3006 .Case("za2.s", AArch64::ZAS2)
3007 .Case("za3.s", AArch64::ZAS3)
3008 .Case("za0.h", AArch64::ZAH0)
3009 .Case("za1.h", AArch64::ZAH1)
3010 .Case("za0.b", AArch64::ZAB0)
3011 .Case("za0h.q", AArch64::ZAQ0)
3012 .Case("za1h.q", AArch64::ZAQ1)
3013 .Case("za2h.q", AArch64::ZAQ2)
3014 .Case("za3h.q", AArch64::ZAQ3)
3015 .Case("za4h.q", AArch64::ZAQ4)
3016 .Case("za5h.q", AArch64::ZAQ5)
3017 .Case("za6h.q", AArch64::ZAQ6)
3018 .Case("za7h.q", AArch64::ZAQ7)
3019 .Case("za8h.q", AArch64::ZAQ8)
3020 .Case("za9h.q", AArch64::ZAQ9)
3021 .Case("za10h.q", AArch64::ZAQ10)
3022 .Case("za11h.q", AArch64::ZAQ11)
3023 .Case("za12h.q", AArch64::ZAQ12)
3024 .Case("za13h.q", AArch64::ZAQ13)
3025 .Case("za14h.q", AArch64::ZAQ14)
3026 .Case("za15h.q", AArch64::ZAQ15)
3027 .Case("za0h.d", AArch64::ZAD0)
3028 .Case("za1h.d", AArch64::ZAD1)
3029 .Case("za2h.d", AArch64::ZAD2)
3030 .Case("za3h.d", AArch64::ZAD3)
3031 .Case("za4h.d", AArch64::ZAD4)
3032 .Case("za5h.d", AArch64::ZAD5)
3033 .Case("za6h.d", AArch64::ZAD6)
3034 .Case("za7h.d", AArch64::ZAD7)
3035 .Case("za0h.s", AArch64::ZAS0)
3036 .Case("za1h.s", AArch64::ZAS1)
3037 .Case("za2h.s", AArch64::ZAS2)
3038 .Case("za3h.s", AArch64::ZAS3)
3039 .Case("za0h.h", AArch64::ZAH0)
3040 .Case("za1h.h", AArch64::ZAH1)
3041 .Case("za0h.b", AArch64::ZAB0)
3042 .Case("za0v.q", AArch64::ZAQ0)
3043 .Case("za1v.q", AArch64::ZAQ1)
3044 .Case("za2v.q", AArch64::ZAQ2)
3045 .Case("za3v.q", AArch64::ZAQ3)
3046 .Case("za4v.q", AArch64::ZAQ4)
3047 .Case("za5v.q", AArch64::ZAQ5)
3048 .Case("za6v.q", AArch64::ZAQ6)
3049 .Case("za7v.q", AArch64::ZAQ7)
3050 .Case("za8v.q", AArch64::ZAQ8)
3051 .Case("za9v.q", AArch64::ZAQ9)
3052 .Case("za10v.q", AArch64::ZAQ10)
3053 .Case("za11v.q", AArch64::ZAQ11)
3054 .Case("za12v.q", AArch64::ZAQ12)
3055 .Case("za13v.q", AArch64::ZAQ13)
3056 .Case("za14v.q", AArch64::ZAQ14)
3057 .Case("za15v.q", AArch64::ZAQ15)
3058 .Case("za0v.d", AArch64::ZAD0)
3059 .Case("za1v.d", AArch64::ZAD1)
3060 .Case("za2v.d", AArch64::ZAD2)
3061 .Case("za3v.d", AArch64::ZAD3)
3062 .Case("za4v.d", AArch64::ZAD4)
3063 .Case("za5v.d", AArch64::ZAD5)
3064 .Case("za6v.d", AArch64::ZAD6)
3065 .Case("za7v.d", AArch64::ZAD7)
3066 .Case("za0v.s", AArch64::ZAS0)
3067 .Case("za1v.s", AArch64::ZAS1)
3068 .Case("za2v.s", AArch64::ZAS2)
3069 .Case("za3v.s", AArch64::ZAS3)
3070 .Case("za0v.h", AArch64::ZAH0)
3071 .Case("za1v.h", AArch64::ZAH1)
3072 .Case("za0v.b", AArch64::ZAB0)
3073 .Default(0);
3074}
3075
3076bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
3077 SMLoc &EndLoc) {
3078 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
3079}
3080
3081ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3082 SMLoc &EndLoc) {
3083 StartLoc = getLoc();
3084 ParseStatus Res = tryParseScalarRegister(Reg);
3085 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3086 return Res;
3087}
3088
3089// Matches a register name or register alias previously defined by '.req'
3090MCRegister AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3091 RegKind Kind) {
3092 MCRegister Reg = MCRegister();
3093 if ((Reg = matchSVEDataVectorRegName(Name)))
3094 return Kind == RegKind::SVEDataVector ? Reg : MCRegister();
3095
3096 if ((Reg = matchSVEPredicateVectorRegName(Name)))
3097 return Kind == RegKind::SVEPredicateVector ? Reg : MCRegister();
3098
3100 return Kind == RegKind::SVEPredicateAsCounter ? Reg : MCRegister();
3101
3102 if ((Reg = MatchNeonVectorRegName(Name)))
3103 return Kind == RegKind::NeonVector ? Reg : MCRegister();
3104
3105 if ((Reg = matchMatrixRegName(Name)))
3106 return Kind == RegKind::Matrix ? Reg : MCRegister();
3107
3108 if (Name.equals_insensitive("zt0"))
3109 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3110
3111 // The parsed register must be of RegKind Scalar
3112 if ((Reg = MatchRegisterName(Name)))
3113 return (Kind == RegKind::Scalar) ? Reg : MCRegister();
3114
3115 if (!Reg) {
3116 // Handle a few common aliases of registers.
3117 if (MCRegister Reg = StringSwitch<unsigned>(Name.lower())
3118 .Case("fp", AArch64::FP)
3119 .Case("lr", AArch64::LR)
3120 .Case("x31", AArch64::XZR)
3121 .Case("w31", AArch64::WZR)
3122 .Default(0))
3123 return Kind == RegKind::Scalar ? Reg : MCRegister();
3124
3125 // Check for aliases registered via .req. Canonicalize to lower case.
3126 // That's more consistent since register names are case insensitive, and
3127 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3128 auto Entry = RegisterReqs.find(Name.lower());
3129 if (Entry == RegisterReqs.end())
3130 return MCRegister();
3131
3132 // set Reg if the match is the right kind of register
3133 if (Kind == Entry->getValue().first)
3134 Reg = Entry->getValue().second;
3135 }
3136 return Reg;
3137}
3138
3139unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3140 switch (K) {
3141 case RegKind::Scalar:
3142 case RegKind::NeonVector:
3143 case RegKind::SVEDataVector:
3144 return 32;
3145 case RegKind::Matrix:
3146 case RegKind::SVEPredicateVector:
3147 case RegKind::SVEPredicateAsCounter:
3148 return 16;
3149 case RegKind::LookupTable:
3150 return 1;
3151 }
3152 llvm_unreachable("Unsupported RegKind");
3153}
3154
3155/// tryParseScalarRegister - Try to parse a register name. The token must be an
3156/// Identifier when called, and if it is a register name the token is eaten and
3157/// the register is added to the operand list.
3158ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3159 const AsmToken &Tok = getTok();
3160 if (Tok.isNot(AsmToken::Identifier))
3161 return ParseStatus::NoMatch;
3162
3163 std::string lowerCase = Tok.getString().lower();
3164 MCRegister Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3165 if (!Reg)
3166 return ParseStatus::NoMatch;
3167
3168 RegNum = Reg;
3169 Lex(); // Eat identifier token.
3170 return ParseStatus::Success;
3171}
3172
3173/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3174ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3175 SMLoc S = getLoc();
3176
3177 if (getTok().isNot(AsmToken::Identifier))
3178 return Error(S, "Expected cN operand where 0 <= N <= 15");
3179
3180 StringRef Tok = getTok().getIdentifier();
3181 if (Tok[0] != 'c' && Tok[0] != 'C')
3182 return Error(S, "Expected cN operand where 0 <= N <= 15");
3183
3184 uint32_t CRNum;
3185 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3186 if (BadNum || CRNum > 15)
3187 return Error(S, "Expected cN operand where 0 <= N <= 15");
3188
3189 Lex(); // Eat identifier token.
3190 Operands.push_back(
3191 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3192 return ParseStatus::Success;
3193}
3194
3195// Either an identifier for named values or a 6-bit immediate.
3196ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3197 SMLoc S = getLoc();
3198 const AsmToken &Tok = getTok();
3199
3200 unsigned MaxVal = 63;
3201
3202 // Immediate case, with optional leading hash:
3203 if (parseOptionalToken(AsmToken::Hash) ||
3204 Tok.is(AsmToken::Integer)) {
3205 const MCExpr *ImmVal;
3206 if (getParser().parseExpression(ImmVal))
3207 return ParseStatus::Failure;
3208
3209 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3210 if (!MCE)
3211 return TokError("immediate value expected for prefetch operand");
3212 unsigned prfop = MCE->getValue();
3213 if (prfop > MaxVal)
3214 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3215 "] expected");
3216
3217 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3218 Operands.push_back(AArch64Operand::CreatePrefetch(
3219 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3220 return ParseStatus::Success;
3221 }
3222
3223 if (Tok.isNot(AsmToken::Identifier))
3224 return TokError("prefetch hint expected");
3225
3226 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3227 if (!RPRFM)
3228 return TokError("prefetch hint expected");
3229
3230 Operands.push_back(AArch64Operand::CreatePrefetch(
3231 RPRFM->Encoding, Tok.getString(), S, getContext()));
3232 Lex(); // Eat identifier token.
3233 return ParseStatus::Success;
3234}
3235
3236/// tryParsePrefetch - Try to parse a prefetch operand.
3237template <bool IsSVEPrefetch>
3238ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3239 SMLoc S = getLoc();
3240 const AsmToken &Tok = getTok();
3241
3242 auto LookupByName = [](StringRef N) {
3243 if (IsSVEPrefetch) {
3244 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3245 return std::optional<unsigned>(Res->Encoding);
3246 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3247 return std::optional<unsigned>(Res->Encoding);
3248 return std::optional<unsigned>();
3249 };
3250
3251 auto LookupByEncoding = [](unsigned E) {
3252 if (IsSVEPrefetch) {
3253 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3254 return std::optional<StringRef>(Res->Name);
3255 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3256 return std::optional<StringRef>(Res->Name);
3257 return std::optional<StringRef>();
3258 };
3259 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3260
3261 // Either an identifier for named values or a 5-bit immediate.
3262 // Eat optional hash.
3263 if (parseOptionalToken(AsmToken::Hash) ||
3264 Tok.is(AsmToken::Integer)) {
3265 const MCExpr *ImmVal;
3266 if (getParser().parseExpression(ImmVal))
3267 return ParseStatus::Failure;
3268
3269 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3270 if (!MCE)
3271 return TokError("immediate value expected for prefetch operand");
3272 unsigned prfop = MCE->getValue();
3273 if (prfop > MaxVal)
3274 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3275 "] expected");
3276
3277 auto PRFM = LookupByEncoding(MCE->getValue());
3278 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3279 S, getContext()));
3280 return ParseStatus::Success;
3281 }
3282
3283 if (Tok.isNot(AsmToken::Identifier))
3284 return TokError("prefetch hint expected");
3285
3286 auto PRFM = LookupByName(Tok.getString());
3287 if (!PRFM)
3288 return TokError("prefetch hint expected");
3289
3290 Operands.push_back(AArch64Operand::CreatePrefetch(
3291 *PRFM, Tok.getString(), S, getContext()));
3292 Lex(); // Eat identifier token.
3293 return ParseStatus::Success;
3294}
3295
3296/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3297ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3298 SMLoc S = getLoc();
3299 const AsmToken &Tok = getTok();
3300 if (Tok.isNot(AsmToken::Identifier))
3301 return TokError("invalid operand for instruction");
3302
3303 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3304 if (!PSB)
3305 return TokError("invalid operand for instruction");
3306
3307 Operands.push_back(AArch64Operand::CreatePSBHint(
3308 PSB->Encoding, Tok.getString(), S, getContext()));
3309 Lex(); // Eat identifier token.
3310 return ParseStatus::Success;
3311}
3312
3313ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3314 SMLoc StartLoc = getLoc();
3315
3316 MCRegister RegNum;
3317
3318 // The case where xzr, xzr is not present is handled by an InstAlias.
3319
3320 auto RegTok = getTok(); // in case we need to backtrack
3321 if (!tryParseScalarRegister(RegNum).isSuccess())
3322 return ParseStatus::NoMatch;
3323
3324 if (RegNum != AArch64::XZR) {
3325 getLexer().UnLex(RegTok);
3326 return ParseStatus::NoMatch;
3327 }
3328
3329 if (parseComma())
3330 return ParseStatus::Failure;
3331
3332 if (!tryParseScalarRegister(RegNum).isSuccess())
3333 return TokError("expected register operand");
3334
3335 if (RegNum != AArch64::XZR)
3336 return TokError("xzr must be followed by xzr");
3337
3338 // We need to push something, since we claim this is an operand in .td.
3339 // See also AArch64AsmParser::parseKeywordOperand.
3340 Operands.push_back(AArch64Operand::CreateReg(
3341 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3342
3343 return ParseStatus::Success;
3344}
3345
3346/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3347ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3348 SMLoc S = getLoc();
3349 const AsmToken &Tok = getTok();
3350 if (Tok.isNot(AsmToken::Identifier))
3351 return TokError("invalid operand for instruction");
3352
3353 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3354 if (!BTI)
3355 return TokError("invalid operand for instruction");
3356
3357 Operands.push_back(AArch64Operand::CreateBTIHint(
3358 BTI->Encoding, Tok.getString(), S, getContext()));
3359 Lex(); // Eat identifier token.
3360 return ParseStatus::Success;
3361}
3362
3363/// tryParseCMHPriorityHint - Try to parse a CMHPriority operand
3364ParseStatus AArch64AsmParser::tryParseCMHPriorityHint(OperandVector &Operands) {
3365 SMLoc S = getLoc();
3366 const AsmToken &Tok = getTok();
3367 if (Tok.isNot(AsmToken::Identifier))
3368 return TokError("invalid operand for instruction");
3369
3370 auto CMHPriority =
3371 AArch64CMHPriorityHint::lookupCMHPriorityHintByName(Tok.getString());
3372 if (!CMHPriority)
3373 return TokError("invalid operand for instruction");
3374
3375 Operands.push_back(AArch64Operand::CreateCMHPriorityHint(
3376 CMHPriority->Encoding, Tok.getString(), S, getContext()));
3377 Lex(); // Eat identifier token.
3378 return ParseStatus::Success;
3379}
3380
3381/// tryParseTIndexHint - Try to parse a TIndex operand
3382ParseStatus AArch64AsmParser::tryParseTIndexHint(OperandVector &Operands) {
3383 SMLoc S = getLoc();
3384 const AsmToken &Tok = getTok();
3385 if (Tok.isNot(AsmToken::Identifier))
3386 return TokError("invalid operand for instruction");
3387
3388 auto TIndex = AArch64TIndexHint::lookupTIndexByName(Tok.getString());
3389 if (!TIndex)
3390 return TokError("invalid operand for instruction");
3391
3392 Operands.push_back(AArch64Operand::CreateTIndexHint(
3393 TIndex->Encoding, Tok.getString(), S, getContext()));
3394 Lex(); // Eat identifier token.
3395 return ParseStatus::Success;
3396}
3397
3398/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3399/// instruction.
3400ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3401 SMLoc S = getLoc();
3402 const MCExpr *Expr = nullptr;
3403
3404 if (getTok().is(AsmToken::Hash)) {
3405 Lex(); // Eat hash token.
3406 }
3407
3408 if (parseSymbolicImmVal(Expr))
3409 return ParseStatus::Failure;
3410
3411 AArch64::Specifier ELFSpec;
3412 AArch64::Specifier DarwinSpec;
3413 int64_t Addend;
3414 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3415 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3416 // No modifier was specified at all; this is the syntax for an ELF basic
3417 // ADRP relocation (unfortunately).
3418 Expr =
3420 } else if ((DarwinSpec == AArch64::S_MACHO_GOTPAGE ||
3421 DarwinSpec == AArch64::S_MACHO_TLVPPAGE) &&
3422 Addend != 0) {
3423 return Error(S, "gotpage label reference not allowed an addend");
3424 } else if (DarwinSpec != AArch64::S_MACHO_PAGE &&
3425 DarwinSpec != AArch64::S_MACHO_GOTPAGE &&
3426 DarwinSpec != AArch64::S_MACHO_TLVPPAGE &&
3427 ELFSpec != AArch64::S_ABS_PAGE_NC &&
3428 ELFSpec != AArch64::S_GOT_PAGE &&
3429 ELFSpec != AArch64::S_GOT_AUTH_PAGE &&
3430 ELFSpec != AArch64::S_GOT_PAGE_LO15 &&
3431 ELFSpec != AArch64::S_GOTTPREL_PAGE &&
3432 ELFSpec != AArch64::S_TLSDESC_PAGE &&
3433 ELFSpec != AArch64::S_TLSDESC_AUTH_PAGE) {
3434 // The operand must be an @page or @gotpage qualified symbolref.
3435 return Error(S, "page or gotpage label reference expected");
3436 }
3437 }
3438
3439 // We have either a label reference possibly with addend or an immediate. The
3440 // addend is a raw value here. The linker will adjust it to only reference the
3441 // page.
3442 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3443 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3444
3445 return ParseStatus::Success;
3446}
3447
3448/// tryParseAdrLabel - Parse and validate a source label for the ADR
3449/// instruction.
3450ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3451 SMLoc S = getLoc();
3452 const MCExpr *Expr = nullptr;
3453
3454 // Leave anything with a bracket to the default for SVE
3455 if (getTok().is(AsmToken::LBrac))
3456 return ParseStatus::NoMatch;
3457
3458 if (getTok().is(AsmToken::Hash))
3459 Lex(); // Eat hash token.
3460
3461 if (parseSymbolicImmVal(Expr))
3462 return ParseStatus::Failure;
3463
3464 AArch64::Specifier ELFSpec;
3465 AArch64::Specifier DarwinSpec;
3466 int64_t Addend;
3467 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3468 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3469 // No modifier was specified at all; this is the syntax for an ELF basic
3470 // ADR relocation (unfortunately).
3472 } else if (ELFSpec != AArch64::S_GOT_AUTH_PAGE) {
3473 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3474 // adr. It's not actually GOT entry page address but the GOT address
3475 // itself - we just share the same variant kind with :got_auth: operator
3476 // applied for adrp.
3477 // TODO: can we somehow get current TargetMachine object to call
3478 // getCodeModel() on it to ensure we are using tiny code model?
3479 return Error(S, "unexpected adr label");
3480 }
3481 }
3482
3483 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3484 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3485 return ParseStatus::Success;
3486}
3487
3488/// tryParseFPImm - A floating point immediate expression operand.
3489template <bool AddFPZeroAsLiteral>
3490ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3491 SMLoc S = getLoc();
3492
3493 bool Hash = parseOptionalToken(AsmToken::Hash);
3494
3495 // Handle negation, as that still comes through as a separate token.
3496 bool isNegative = parseOptionalToken(AsmToken::Minus);
3497
3498 const AsmToken &Tok = getTok();
3499 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3500 if (!Hash)
3501 return ParseStatus::NoMatch;
3502 return TokError("invalid floating point immediate");
3503 }
3504
3505 // Parse hexadecimal representation.
3506 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3507 if (Tok.getIntVal() > 255 || isNegative)
3508 return TokError("encoded floating point value out of range");
3509
3511 Operands.push_back(
3512 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3513 } else {
3514 // Parse FP representation.
3515 APFloat RealVal(APFloat::IEEEdouble());
3516 auto StatusOrErr =
3517 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3518 if (errorToBool(StatusOrErr.takeError()))
3519 return TokError("invalid floating point representation");
3520
3521 if (isNegative)
3522 RealVal.changeSign();
3523
3524 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3525 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3526 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3527 } else
3528 Operands.push_back(AArch64Operand::CreateFPImm(
3529 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3530 }
3531
3532 Lex(); // Eat the token.
3533
3534 return ParseStatus::Success;
3535}
3536
3537/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3538/// a shift suffix, for example '#1, lsl #12'.
3539ParseStatus
3540AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3541 SMLoc S = getLoc();
3542
3543 if (getTok().is(AsmToken::Hash))
3544 Lex(); // Eat '#'
3545 else if (getTok().isNot(AsmToken::Integer))
3546 // Operand should start from # or should be integer, emit error otherwise.
3547 return ParseStatus::NoMatch;
3548
3549 if (getTok().is(AsmToken::Integer) &&
3550 getLexer().peekTok().is(AsmToken::Colon))
3551 return tryParseImmRange(Operands);
3552
3553 const MCExpr *Imm = nullptr;
3554 if (parseSymbolicImmVal(Imm))
3555 return ParseStatus::Failure;
3556 else if (getTok().isNot(AsmToken::Comma)) {
3557 Operands.push_back(
3558 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3559 return ParseStatus::Success;
3560 }
3561
3562 // Eat ','
3563 Lex();
3564 StringRef VecGroup;
3565 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3566 Operands.push_back(
3567 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3568 Operands.push_back(
3569 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3570 return ParseStatus::Success;
3571 }
3572
3573 // The optional operand must be "lsl #N" where N is non-negative.
3574 if (!getTok().is(AsmToken::Identifier) ||
3575 !getTok().getIdentifier().equals_insensitive("lsl"))
3576 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3577
3578 // Eat 'lsl'
3579 Lex();
3580
3581 parseOptionalToken(AsmToken::Hash);
3582
3583 if (getTok().isNot(AsmToken::Integer))
3584 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3585
3586 int64_t ShiftAmount = getTok().getIntVal();
3587
3588 if (ShiftAmount < 0)
3589 return Error(getLoc(), "positive shift amount required");
3590 Lex(); // Eat the number
3591
3592 // Just in case the optional lsl #0 is used for immediates other than zero.
3593 if (ShiftAmount == 0 && Imm != nullptr) {
3594 Operands.push_back(
3595 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3596 return ParseStatus::Success;
3597 }
3598
3599 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3600 getLoc(), getContext()));
3601 return ParseStatus::Success;
3602}
3603
3604/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3605/// suggestion to help common typos.
3607AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3608 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3609 .Case("eq", AArch64CC::EQ)
3610 .Case("ne", AArch64CC::NE)
3611 .Case("cs", AArch64CC::HS)
3612 .Case("hs", AArch64CC::HS)
3613 .Case("cc", AArch64CC::LO)
3614 .Case("lo", AArch64CC::LO)
3615 .Case("mi", AArch64CC::MI)
3616 .Case("pl", AArch64CC::PL)
3617 .Case("vs", AArch64CC::VS)
3618 .Case("vc", AArch64CC::VC)
3619 .Case("hi", AArch64CC::HI)
3620 .Case("ls", AArch64CC::LS)
3621 .Case("ge", AArch64CC::GE)
3622 .Case("lt", AArch64CC::LT)
3623 .Case("gt", AArch64CC::GT)
3624 .Case("le", AArch64CC::LE)
3625 .Case("al", AArch64CC::AL)
3626 .Case("nv", AArch64CC::NV)
3627 // SVE condition code aliases:
3628 .Case("none", AArch64CC::EQ)
3629 .Case("any", AArch64CC::NE)
3630 .Case("nlast", AArch64CC::HS)
3631 .Case("last", AArch64CC::LO)
3632 .Case("first", AArch64CC::MI)
3633 .Case("nfrst", AArch64CC::PL)
3634 .Case("pmore", AArch64CC::HI)
3635 .Case("plast", AArch64CC::LS)
3636 .Case("tcont", AArch64CC::GE)
3637 .Case("tstop", AArch64CC::LT)
3638 .Default(AArch64CC::Invalid);
3639
3640 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3641 Suggestion = "nfrst";
3642
3643 return CC;
3644}
3645
3646/// parseCondCode - Parse a Condition Code operand.
3647bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3648 bool invertCondCode) {
3649 SMLoc S = getLoc();
3650 const AsmToken &Tok = getTok();
3651 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3652
3653 StringRef Cond = Tok.getString();
3654 std::string Suggestion;
3655 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3656 if (CC == AArch64CC::Invalid) {
3657 std::string Msg = "invalid condition code";
3658 if (!Suggestion.empty())
3659 Msg += ", did you mean " + Suggestion + "?";
3660 return TokError(Msg);
3661 }
3662 Lex(); // Eat identifier token.
3663
3664 if (invertCondCode) {
3665 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3666 return TokError("condition codes AL and NV are invalid for this instruction");
3668 }
3669
3670 Operands.push_back(
3671 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3672 return false;
3673}
3674
3675ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3676 const AsmToken &Tok = getTok();
3677 SMLoc S = getLoc();
3678
3679 if (Tok.isNot(AsmToken::Identifier))
3680 return TokError("invalid operand for instruction");
3681
3682 unsigned PStateImm = -1;
3683 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3684 if (!SVCR)
3685 return ParseStatus::NoMatch;
3686 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3687 PStateImm = SVCR->Encoding;
3688
3689 Operands.push_back(
3690 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3691 Lex(); // Eat identifier token.
3692 return ParseStatus::Success;
3693}
3694
3695ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3696 const AsmToken &Tok = getTok();
3697 SMLoc S = getLoc();
3698
3699 StringRef Name = Tok.getString();
3700
3701 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3702 Lex(); // eat "za[.(b|h|s|d)]"
3703 unsigned ElementWidth = 0;
3704 auto DotPosition = Name.find('.');
3705 if (DotPosition != StringRef::npos) {
3706 const auto &KindRes =
3707 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3708 if (!KindRes)
3709 return TokError(
3710 "Expected the register to be followed by element width suffix");
3711 ElementWidth = KindRes->second;
3712 }
3713 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3714 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3715 getContext()));
3716 if (getLexer().is(AsmToken::LBrac)) {
3717 // There's no comma after matrix operand, so we can parse the next operand
3718 // immediately.
3719 if (parseOperand(Operands, false, false))
3720 return ParseStatus::NoMatch;
3721 }
3722 return ParseStatus::Success;
3723 }
3724
3725 // Try to parse matrix register.
3726 MCRegister Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3727 if (!Reg)
3728 return ParseStatus::NoMatch;
3729
3730 size_t DotPosition = Name.find('.');
3731 assert(DotPosition != StringRef::npos && "Unexpected register");
3732
3733 StringRef Head = Name.take_front(DotPosition);
3734 StringRef Tail = Name.drop_front(DotPosition);
3735 StringRef RowOrColumn = Head.take_back();
3736
3737 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3738 .Case("h", MatrixKind::Row)
3739 .Case("v", MatrixKind::Col)
3740 .Default(MatrixKind::Tile);
3741
3742 // Next up, parsing the suffix
3743 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3744 if (!KindRes)
3745 return TokError(
3746 "Expected the register to be followed by element width suffix");
3747 unsigned ElementWidth = KindRes->second;
3748
3749 Lex();
3750
3751 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3752 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3753
3754 if (getLexer().is(AsmToken::LBrac)) {
3755 // There's no comma after matrix operand, so we can parse the next operand
3756 // immediately.
3757 if (parseOperand(Operands, false, false))
3758 return ParseStatus::NoMatch;
3759 }
3760 return ParseStatus::Success;
3761}
3762
3763/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3764/// them if present.
3765ParseStatus
3766AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3767 const AsmToken &Tok = getTok();
3768 std::string LowerID = Tok.getString().lower();
3770 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3771 .Case("lsl", AArch64_AM::LSL)
3772 .Case("lsr", AArch64_AM::LSR)
3773 .Case("asr", AArch64_AM::ASR)
3774 .Case("ror", AArch64_AM::ROR)
3775 .Case("msl", AArch64_AM::MSL)
3776 .Case("uxtb", AArch64_AM::UXTB)
3777 .Case("uxth", AArch64_AM::UXTH)
3778 .Case("uxtw", AArch64_AM::UXTW)
3779 .Case("uxtx", AArch64_AM::UXTX)
3780 .Case("sxtb", AArch64_AM::SXTB)
3781 .Case("sxth", AArch64_AM::SXTH)
3782 .Case("sxtw", AArch64_AM::SXTW)
3783 .Case("sxtx", AArch64_AM::SXTX)
3785
3787 return ParseStatus::NoMatch;
3788
3789 SMLoc S = Tok.getLoc();
3790 Lex();
3791
3792 bool Hash = parseOptionalToken(AsmToken::Hash);
3793
3794 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3795 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3796 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3797 ShOp == AArch64_AM::MSL) {
3798 // We expect a number here.
3799 return TokError("expected #imm after shift specifier");
3800 }
3801
3802 // "extend" type operations don't need an immediate, #0 is implicit.
3803 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3804 Operands.push_back(
3805 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3806 return ParseStatus::Success;
3807 }
3808
3809 // Make sure we do actually have a number, identifier or a parenthesized
3810 // expression.
3811 SMLoc E = getLoc();
3812 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3813 !getTok().is(AsmToken::Identifier))
3814 return Error(E, "expected integer shift amount");
3815
3816 const MCExpr *ImmVal;
3817 if (getParser().parseExpression(ImmVal))
3818 return ParseStatus::Failure;
3819
3820 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3821 if (!MCE)
3822 return Error(E, "expected constant '#imm' after shift specifier");
3823
3824 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3825 Operands.push_back(AArch64Operand::CreateShiftExtend(
3826 ShOp, MCE->getValue(), true, S, E, getContext()));
3827 return ParseStatus::Success;
3828}
3829
3830static const struct Extension {
3831 const char *Name;
3833} ExtensionMap[] = {
3834 {"crc", {AArch64::FeatureCRC}},
3835 {"sm4", {AArch64::FeatureSM4}},
3836 {"sha3", {AArch64::FeatureSHA3}},
3837 {"sha2", {AArch64::FeatureSHA2}},
3838 {"aes", {AArch64::FeatureAES}},
3839 {"crypto", {AArch64::FeatureCrypto}},
3840 {"fp", {AArch64::FeatureFPARMv8}},
3841 {"simd", {AArch64::FeatureNEON}},
3842 {"ras", {AArch64::FeatureRAS}},
3843 {"rasv2", {AArch64::FeatureRASv2}},
3844 {"lse", {AArch64::FeatureLSE}},
3845 {"predres", {AArch64::FeaturePredRes}},
3846 {"predres2", {AArch64::FeatureSPECRES2}},
3847 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3848 {"mte", {AArch64::FeatureMTE}},
3849 {"memtag", {AArch64::FeatureMTE}},
3850 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3851 {"pan", {AArch64::FeaturePAN}},
3852 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3853 {"ccpp", {AArch64::FeatureCCPP}},
3854 {"rcpc", {AArch64::FeatureRCPC}},
3855 {"rng", {AArch64::FeatureRandGen}},
3856 {"sve", {AArch64::FeatureSVE}},
3857 {"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3858 {"sve2", {AArch64::FeatureSVE2}},
3859 {"sve-aes", {AArch64::FeatureSVEAES}},
3860 {"sve2-aes", {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3861 {"sve-sm4", {AArch64::FeatureSVESM4}},
3862 {"sve2-sm4", {AArch64::FeatureAliasSVE2SM4, AArch64::FeatureSVESM4}},
3863 {"sve-sha3", {AArch64::FeatureSVESHA3}},
3864 {"sve2-sha3", {AArch64::FeatureAliasSVE2SHA3, AArch64::FeatureSVESHA3}},
3865 {"sve-bitperm", {AArch64::FeatureSVEBitPerm}},
3866 {"sve2-bitperm",
3867 {AArch64::FeatureAliasSVE2BitPerm, AArch64::FeatureSVEBitPerm,
3868 AArch64::FeatureSVE2}},
3869 {"sve2p1", {AArch64::FeatureSVE2p1}},
3870 {"ls64", {AArch64::FeatureLS64}},
3871 {"xs", {AArch64::FeatureXS}},
3872 {"pauth", {AArch64::FeaturePAuth}},
3873 {"flagm", {AArch64::FeatureFlagM}},
3874 {"rme", {AArch64::FeatureRME}},
3875 {"sme", {AArch64::FeatureSME}},
3876 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3877 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3878 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3879 {"sme2", {AArch64::FeatureSME2}},
3880 {"sme2p1", {AArch64::FeatureSME2p1}},
3881 {"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3882 {"hbc", {AArch64::FeatureHBC}},
3883 {"mops", {AArch64::FeatureMOPS}},
3884 {"mec", {AArch64::FeatureMEC}},
3885 {"the", {AArch64::FeatureTHE}},
3886 {"d128", {AArch64::FeatureD128}},
3887 {"lse128", {AArch64::FeatureLSE128}},
3888 {"ite", {AArch64::FeatureITE}},
3889 {"cssc", {AArch64::FeatureCSSC}},
3890 {"rcpc3", {AArch64::FeatureRCPC3}},
3891 {"gcs", {AArch64::FeatureGCS}},
3892 {"bf16", {AArch64::FeatureBF16}},
3893 {"compnum", {AArch64::FeatureComplxNum}},
3894 {"dotprod", {AArch64::FeatureDotProd}},
3895 {"f32mm", {AArch64::FeatureMatMulFP32}},
3896 {"f64mm", {AArch64::FeatureMatMulFP64}},
3897 {"fp16", {AArch64::FeatureFullFP16}},
3898 {"fp16fml", {AArch64::FeatureFP16FML}},
3899 {"i8mm", {AArch64::FeatureMatMulInt8}},
3900 {"lor", {AArch64::FeatureLOR}},
3901 {"profile", {AArch64::FeatureSPE}},
3902 // "rdma" is the name documented by binutils for the feature, but
3903 // binutils also accepts incomplete prefixes of features, so "rdm"
3904 // works too. Support both spellings here.
3905 {"rdm", {AArch64::FeatureRDM}},
3906 {"rdma", {AArch64::FeatureRDM}},
3907 {"sb", {AArch64::FeatureSB}},
3908 {"ssbs", {AArch64::FeatureSSBS}},
3909 {"fp8", {AArch64::FeatureFP8}},
3910 {"faminmax", {AArch64::FeatureFAMINMAX}},
3911 {"fp8fma", {AArch64::FeatureFP8FMA}},
3912 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3913 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3914 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3915 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3916 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3917 {"lut", {AArch64::FeatureLUT}},
3918 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3919 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3920 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3921 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3922 {"cpa", {AArch64::FeatureCPA}},
3923 {"tlbiw", {AArch64::FeatureTLBIW}},
3924 {"pops", {AArch64::FeaturePoPS}},
3925 {"cmpbr", {AArch64::FeatureCMPBR}},
3926 {"f8f32mm", {AArch64::FeatureF8F32MM}},
3927 {"f8f16mm", {AArch64::FeatureF8F16MM}},
3928 {"fprcvt", {AArch64::FeatureFPRCVT}},
3929 {"lsfe", {AArch64::FeatureLSFE}},
3930 {"sme2p2", {AArch64::FeatureSME2p2}},
3931 {"ssve-aes", {AArch64::FeatureSSVE_AES}},
3932 {"sve2p2", {AArch64::FeatureSVE2p2}},
3933 {"sve-aes2", {AArch64::FeatureSVEAES2}},
3934 {"sve-bfscale", {AArch64::FeatureSVEBFSCALE}},
3935 {"sve-f16f32mm", {AArch64::FeatureSVE_F16F32MM}},
3936 {"lsui", {AArch64::FeatureLSUI}},
3937 {"occmo", {AArch64::FeatureOCCMO}},
3938 {"ssve-bitperm", {AArch64::FeatureSSVE_BitPerm}},
3939 {"sme-mop4", {AArch64::FeatureSME_MOP4}},
3940 {"sme-tmop", {AArch64::FeatureSME_TMOP}},
3941 {"lscp", {AArch64::FeatureLSCP}},
3942 {"tlbid", {AArch64::FeatureTLBID}},
3943 {"mpamv2", {AArch64::FeatureMPAMv2}},
3944 {"mtetc", {AArch64::FeatureMTETC}},
3945 {"gcie", {AArch64::FeatureGCIE}},
3946 {"sme2p3", {AArch64::FeatureSME2p3}},
3947 {"sve2p3", {AArch64::FeatureSVE2p3}},
3948 {"sve-b16mm", {AArch64::FeatureSVE_B16MM}},
3949 {"f16mm", {AArch64::FeatureF16MM}},
3950 {"f16f32dot", {AArch64::FeatureF16F32DOT}},
3951 {"f16f32mm", {AArch64::FeatureF16F32MM}},
3952 {"mops-go", {AArch64::FeatureMOPS_GO}},
3953 {"poe2", {AArch64::FeatureS1POE2}},
3954 {"tev", {AArch64::FeatureTEV}},
3955 {"btie", {AArch64::FeatureBTIE}},
3956 {"dit", {AArch64::FeatureDIT}},
3957 {"brbe", {AArch64::FeatureBRBE}},
3958 {"bti", {AArch64::FeatureBranchTargetId}},
3959 {"fcma", {AArch64::FeatureComplxNum}},
3960 {"jscvt", {AArch64::FeatureJS}},
3961 {"pauth-lr", {AArch64::FeaturePAuthLR}},
3962 {"ssve-fexpa", {AArch64::FeatureSSVE_FEXPA}},
3963 {"wfxt", {AArch64::FeatureWFxT}},
3965
3966static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3967 if (FBS[AArch64::HasV8_0aOps])
3968 Str += "ARMv8a";
3969 if (FBS[AArch64::HasV8_1aOps])
3970 Str += "ARMv8.1a";
3971 else if (FBS[AArch64::HasV8_2aOps])
3972 Str += "ARMv8.2a";
3973 else if (FBS[AArch64::HasV8_3aOps])
3974 Str += "ARMv8.3a";
3975 else if (FBS[AArch64::HasV8_4aOps])
3976 Str += "ARMv8.4a";
3977 else if (FBS[AArch64::HasV8_5aOps])
3978 Str += "ARMv8.5a";
3979 else if (FBS[AArch64::HasV8_6aOps])
3980 Str += "ARMv8.6a";
3981 else if (FBS[AArch64::HasV8_7aOps])
3982 Str += "ARMv8.7a";
3983 else if (FBS[AArch64::HasV8_8aOps])
3984 Str += "ARMv8.8a";
3985 else if (FBS[AArch64::HasV8_9aOps])
3986 Str += "ARMv8.9a";
3987 else if (FBS[AArch64::HasV9_0aOps])
3988 Str += "ARMv9-a";
3989 else if (FBS[AArch64::HasV9_1aOps])
3990 Str += "ARMv9.1a";
3991 else if (FBS[AArch64::HasV9_2aOps])
3992 Str += "ARMv9.2a";
3993 else if (FBS[AArch64::HasV9_3aOps])
3994 Str += "ARMv9.3a";
3995 else if (FBS[AArch64::HasV9_4aOps])
3996 Str += "ARMv9.4a";
3997 else if (FBS[AArch64::HasV9_5aOps])
3998 Str += "ARMv9.5a";
3999 else if (FBS[AArch64::HasV9_6aOps])
4000 Str += "ARMv9.6a";
4001 else if (FBS[AArch64::HasV9_7aOps])
4002 Str += "ARMv9.7a";
4003 else if (FBS[AArch64::HasV8_0rOps])
4004 Str += "ARMv8r";
4005 else {
4006 SmallVector<std::string, 2> ExtMatches;
4007 for (const auto& Ext : ExtensionMap) {
4008 // Use & in case multiple features are enabled
4009 if ((FBS & Ext.Features) != FeatureBitset())
4010 ExtMatches.push_back(Ext.Name);
4011 }
4012 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
4013 }
4014}
4015
4016void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
4017 SMLoc S) {
4018 const uint16_t Op2 = Encoding & 7;
4019 const uint16_t Cm = (Encoding & 0x78) >> 3;
4020 const uint16_t Cn = (Encoding & 0x780) >> 7;
4021 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
4022
4023 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
4024
4025 Operands.push_back(
4026 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
4027 Operands.push_back(
4028 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
4029 Operands.push_back(
4030 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
4031 Expr = MCConstantExpr::create(Op2, getContext());
4032 Operands.push_back(
4033 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
4034}
4035
4036/// parseSysAlias - The IC, DC, AT, TLBI, MLBI and GIC{R} and GSB instructions
4037/// are simple aliases for the SYS instruction. Parse them specially so that
4038/// we create a SYS MCInst.
4039bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
4040 OperandVector &Operands) {
4041 if (Name.contains('.'))
4042 return TokError("invalid operand");
4043
4044 Mnemonic = Name;
4045 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
4046
4047 const AsmToken &Tok = getTok();
4048 StringRef Op = Tok.getString();
4049 SMLoc S = Tok.getLoc();
4050 bool ExpectRegister = true;
4051 bool OptionalRegister = false;
4052 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
4053 bool hasTLBID = getSTI().hasFeature(AArch64::FeatureTLBID);
4054
4055 if (Mnemonic == "ic") {
4056 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
4057 if (!IC)
4058 return TokError("invalid operand for IC instruction");
4059 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
4060 std::string Str("IC " + std::string(IC->Name) + " requires: ");
4062 return TokError(Str);
4063 }
4064 ExpectRegister = IC->NeedsReg;
4065 createSysAlias(IC->Encoding, Operands, S);
4066 } else if (Mnemonic == "dc") {
4067 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
4068 if (!DC)
4069 return TokError("invalid operand for DC instruction");
4070 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
4071 std::string Str("DC " + std::string(DC->Name) + " requires: ");
4073 return TokError(Str);
4074 }
4075 createSysAlias(DC->Encoding, Operands, S);
4076 } else if (Mnemonic == "at") {
4077 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
4078 if (!AT)
4079 return TokError("invalid operand for AT instruction");
4080 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
4081 std::string Str("AT " + std::string(AT->Name) + " requires: ");
4083 return TokError(Str);
4084 }
4085 createSysAlias(AT->Encoding, Operands, S);
4086 } else if (Mnemonic == "tlbi") {
4087 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
4088 if (!TLBI)
4089 return TokError("invalid operand for TLBI instruction");
4090 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
4091 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
4093 return TokError(Str);
4094 }
4095 ExpectRegister = TLBI->RegUse == REG_REQUIRED;
4096 if (hasAll || hasTLBID)
4097 OptionalRegister = TLBI->RegUse == REG_OPTIONAL;
4098 createSysAlias(TLBI->Encoding, Operands, S);
4099 } else if (Mnemonic == "mlbi") {
4100 const AArch64MLBI::MLBI *MLBI = AArch64MLBI::lookupMLBIByName(Op);
4101 if (!MLBI)
4102 return TokError("invalid operand for MLBI instruction");
4103 else if (!MLBI->haveFeatures(getSTI().getFeatureBits())) {
4104 std::string Str("MLBI " + std::string(MLBI->Name) + " requires: ");
4106 return TokError(Str);
4107 }
4108 ExpectRegister = MLBI->NeedsReg;
4109 createSysAlias(MLBI->Encoding, Operands, S);
4110 } else if (Mnemonic == "gic") {
4111 const AArch64GIC::GIC *GIC = AArch64GIC::lookupGICByName(Op);
4112 if (!GIC)
4113 return TokError("invalid operand for GIC instruction");
4114 else if (!GIC->haveFeatures(getSTI().getFeatureBits())) {
4115 std::string Str("GIC " + std::string(GIC->Name) + " requires: ");
4117 return TokError(Str);
4118 }
4119 ExpectRegister = GIC->NeedsReg;
4120 createSysAlias(GIC->Encoding, Operands, S);
4121 } else if (Mnemonic == "gsb") {
4122 const AArch64GSB::GSB *GSB = AArch64GSB::lookupGSBByName(Op);
4123 if (!GSB)
4124 return TokError("invalid operand for GSB instruction");
4125 else if (!GSB->haveFeatures(getSTI().getFeatureBits())) {
4126 std::string Str("GSB " + std::string(GSB->Name) + " requires: ");
4128 return TokError(Str);
4129 }
4130 ExpectRegister = false;
4131 createSysAlias(GSB->Encoding, Operands, S);
4132 } else if (Mnemonic == "plbi") {
4133 const AArch64PLBI::PLBI *PLBI = AArch64PLBI::lookupPLBIByName(Op);
4134 if (!PLBI)
4135 return TokError("invalid operand for PLBI instruction");
4136 else if (!PLBI->haveFeatures(getSTI().getFeatureBits())) {
4137 std::string Str("PLBI " + std::string(PLBI->Name) + " requires: ");
4139 return TokError(Str);
4140 }
4141 ExpectRegister = PLBI->RegUse == REG_REQUIRED;
4142 if (hasAll || hasTLBID)
4143 OptionalRegister = PLBI->RegUse == REG_OPTIONAL;
4144 createSysAlias(PLBI->Encoding, Operands, S);
4145 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" ||
4146 Mnemonic == "cosp") {
4147
4148 if (Op.lower() != "rctx")
4149 return TokError("invalid operand for prediction restriction instruction");
4150
4151 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
4152 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
4153
4154 if (Mnemonic == "cosp" && !hasSpecres2)
4155 return TokError("COSP requires: predres2");
4156 if (!hasPredres)
4157 return TokError(Mnemonic.upper() + "RCTX requires: predres");
4158
4159 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
4160 : Mnemonic == "dvp" ? 0b101
4161 : Mnemonic == "cosp" ? 0b110
4162 : Mnemonic == "cpp" ? 0b111
4163 : 0;
4164 assert(PRCTX_Op2 &&
4165 "Invalid mnemonic for prediction restriction instruction");
4166 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
4167 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
4168
4169 createSysAlias(Encoding, Operands, S);
4170 }
4171
4172 Lex(); // Eat operand.
4173
4174 bool HasRegister = false;
4175
4176 // Check for the optional register operand.
4177 if (parseOptionalToken(AsmToken::Comma)) {
4178 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
4179 return TokError("expected register operand");
4180 HasRegister = true;
4181 }
4182
4183 if (!OptionalRegister) {
4184 if (ExpectRegister && !HasRegister)
4185 return TokError("specified " + Mnemonic + " op requires a register");
4186 else if (!ExpectRegister && HasRegister)
4187 return TokError("specified " + Mnemonic + " op does not use a register");
4188 }
4189
4190 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4191 return true;
4192
4193 return false;
4194}
4195
4196/// parseSyslAlias - The GICR instructions are simple aliases for
4197/// the SYSL instruction. Parse them specially so that we create a
4198/// SYS MCInst.
4199bool AArch64AsmParser::parseSyslAlias(StringRef Name, SMLoc NameLoc,
4200 OperandVector &Operands) {
4201
4202 Mnemonic = Name;
4203 Operands.push_back(
4204 AArch64Operand::CreateToken("sysl", NameLoc, getContext()));
4205
4206 // Now expect two operands (identifier + register)
4207 SMLoc startLoc = getLoc();
4208 const AsmToken &regTok = getTok();
4209 StringRef reg = regTok.getString();
4210 MCRegister Reg = matchRegisterNameAlias(reg.lower(), RegKind::Scalar);
4211 if (!Reg)
4212 return TokError("expected register operand");
4213
4214 Operands.push_back(AArch64Operand::CreateReg(
4215 Reg, RegKind::Scalar, startLoc, getLoc(), getContext(), EqualsReg));
4216
4217 Lex(); // Eat token
4218 if (parseToken(AsmToken::Comma))
4219 return true;
4220
4221 // Check for identifier
4222 const AsmToken &operandTok = getTok();
4223 StringRef Op = operandTok.getString();
4224 SMLoc S2 = operandTok.getLoc();
4225 Lex(); // Eat token
4226
4227 if (Mnemonic == "gicr") {
4228 const AArch64GICR::GICR *GICR = AArch64GICR::lookupGICRByName(Op);
4229 if (!GICR)
4230 return Error(S2, "invalid operand for GICR instruction");
4231 else if (!GICR->haveFeatures(getSTI().getFeatureBits())) {
4232 std::string Str("GICR " + std::string(GICR->Name) + " requires: ");
4234 return Error(S2, Str);
4235 }
4236 createSysAlias(GICR->Encoding, Operands, S2);
4237 }
4238
4239 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4240 return true;
4241
4242 return false;
4243}
4244
4245/// parseSyspAlias - The TLBIP instructions are simple aliases for
4246/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4247bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4248 OperandVector &Operands) {
4249 if (Name.contains('.'))
4250 return TokError("invalid operand");
4251
4252 Mnemonic = Name;
4253 Operands.push_back(
4254 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
4255
4256 const AsmToken &Tok = getTok();
4257 StringRef Op = Tok.getString();
4258 SMLoc S = Tok.getLoc();
4259
4260 if (Mnemonic == "tlbip") {
4261 const AArch64TLBIP::TLBIP *TLBIP = AArch64TLBIP::lookupTLBIPByName(Op);
4262 if (!TLBIP)
4263 return TokError("invalid operand for TLBIP instruction");
4264 if (!getSTI().hasFeature(AArch64::FeatureD128) &&
4265 !getSTI().hasFeature(AArch64::FeatureAll))
4266 return TokError("instruction requires: d128");
4267 if (!TLBIP->haveFeatures(getSTI().getFeatureBits())) {
4268 std::string Str("instruction requires: ");
4270 return TokError(Str);
4271 }
4272 createSysAlias(TLBIP->Encoding, Operands, S);
4273 }
4274
4275 Lex(); // Eat operand.
4276
4277 if (parseComma())
4278 return true;
4279
4280 if (Tok.isNot(AsmToken::Identifier))
4281 return TokError("expected register identifier");
4282 auto Result = tryParseSyspXzrPair(Operands);
4283 if (Result.isNoMatch())
4284 Result = tryParseGPRSeqPair(Operands);
4285 if (!Result.isSuccess())
4286 return TokError("specified " + Mnemonic +
4287 " op requires a pair of registers");
4288
4289 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4290 return true;
4291
4292 return false;
4293}
4294
4295ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4296 MCAsmParser &Parser = getParser();
4297 const AsmToken &Tok = getTok();
4298
4299 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
4300 return TokError("'csync' operand expected");
4301 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4302 // Immediate operand.
4303 const MCExpr *ImmVal;
4304 SMLoc ExprLoc = getLoc();
4305 AsmToken IntTok = Tok;
4306 if (getParser().parseExpression(ImmVal))
4307 return ParseStatus::Failure;
4308 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4309 if (!MCE)
4310 return Error(ExprLoc, "immediate value expected for barrier operand");
4311 int64_t Value = MCE->getValue();
4312 if (Mnemonic == "dsb" && Value > 15) {
4313 // This case is a no match here, but it might be matched by the nXS
4314 // variant. Deliberately not unlex the optional '#' as it is not necessary
4315 // to characterize an integer immediate.
4316 Parser.getLexer().UnLex(IntTok);
4317 return ParseStatus::NoMatch;
4318 }
4319 if (Value < 0 || Value > 15)
4320 return Error(ExprLoc, "barrier operand out of range");
4321 auto DB = AArch64DB::lookupDBByEncoding(Value);
4322 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
4323 ExprLoc, getContext(),
4324 false /*hasnXSModifier*/));
4325 return ParseStatus::Success;
4326 }
4327
4328 if (Tok.isNot(AsmToken::Identifier))
4329 return TokError("invalid operand for instruction");
4330
4331 StringRef Operand = Tok.getString();
4332 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4333 auto DB = AArch64DB::lookupDBByName(Operand);
4334 // The only valid named option for ISB is 'sy'
4335 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4336 return TokError("'sy' or #imm operand expected");
4337 // The only valid named option for TSB is 'csync'
4338 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4339 return TokError("'csync' operand expected");
4340 if (!DB && !TSB) {
4341 if (Mnemonic == "dsb") {
4342 // This case is a no match here, but it might be matched by the nXS
4343 // variant.
4344 return ParseStatus::NoMatch;
4345 }
4346 return TokError("invalid barrier option name");
4347 }
4348
4349 Operands.push_back(AArch64Operand::CreateBarrier(
4350 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4351 getContext(), false /*hasnXSModifier*/));
4352 Lex(); // Consume the option
4353
4354 return ParseStatus::Success;
4355}
4356
4357ParseStatus
4358AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4359 const AsmToken &Tok = getTok();
4360
4361 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4362 if (Mnemonic != "dsb")
4363 return ParseStatus::Failure;
4364
4365 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4366 // Immediate operand.
4367 const MCExpr *ImmVal;
4368 SMLoc ExprLoc = getLoc();
4369 if (getParser().parseExpression(ImmVal))
4370 return ParseStatus::Failure;
4371 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4372 if (!MCE)
4373 return Error(ExprLoc, "immediate value expected for barrier operand");
4374 int64_t Value = MCE->getValue();
4375 // v8.7-A DSB in the nXS variant accepts only the following immediate
4376 // values: 16, 20, 24, 28.
4377 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4378 return Error(ExprLoc, "barrier operand out of range");
4379 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4380 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4381 ExprLoc, getContext(),
4382 true /*hasnXSModifier*/));
4383 return ParseStatus::Success;
4384 }
4385
4386 if (Tok.isNot(AsmToken::Identifier))
4387 return TokError("invalid operand for instruction");
4388
4389 StringRef Operand = Tok.getString();
4390 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4391
4392 if (!DB)
4393 return TokError("invalid barrier option name");
4394
4395 Operands.push_back(
4396 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4397 getContext(), true /*hasnXSModifier*/));
4398 Lex(); // Consume the option
4399
4400 return ParseStatus::Success;
4401}
4402
4403ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4404 const AsmToken &Tok = getTok();
4405
4406 if (Tok.isNot(AsmToken::Identifier))
4407 return ParseStatus::NoMatch;
4408
4409 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4410 return ParseStatus::NoMatch;
4411
4412 int MRSReg, MSRReg;
4413 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4414 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4415 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4416 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4417 } else
4418 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4419
4420 unsigned PStateImm = -1;
4421 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4422 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4423 PStateImm = PState15->Encoding;
4424 if (!PState15) {
4425 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4426 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4427 PStateImm = PState1->Encoding;
4428 }
4429
4430 Operands.push_back(
4431 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4432 PStateImm, getContext()));
4433 Lex(); // Eat identifier
4434
4435 return ParseStatus::Success;
4436}
4437
4438ParseStatus
4439AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4440 SMLoc S = getLoc();
4441 const AsmToken &Tok = getTok();
4442 if (Tok.isNot(AsmToken::Identifier))
4443 return TokError("invalid operand for instruction");
4444
4446 if (!PH)
4447 return TokError("invalid operand for instruction");
4448
4449 Operands.push_back(AArch64Operand::CreatePHintInst(
4450 PH->Encoding, Tok.getString(), S, getContext()));
4451 Lex(); // Eat identifier token.
4452 return ParseStatus::Success;
4453}
4454
4455/// tryParseNeonVectorRegister - Parse a vector register operand.
4456bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4457 if (getTok().isNot(AsmToken::Identifier))
4458 return true;
4459
4460 SMLoc S = getLoc();
4461 // Check for a vector register specifier first.
4462 StringRef Kind;
4463 MCRegister Reg;
4464 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4465 if (!Res.isSuccess())
4466 return true;
4467
4468 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4469 if (!KindRes)
4470 return true;
4471
4472 unsigned ElementWidth = KindRes->second;
4473 Operands.push_back(
4474 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4475 S, getLoc(), getContext()));
4476
4477 // If there was an explicit qualifier, that goes on as a literal text
4478 // operand.
4479 if (!Kind.empty())
4480 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4481
4482 return tryParseVectorIndex(Operands).isFailure();
4483}
4484
4485ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4486 SMLoc SIdx = getLoc();
4487 if (parseOptionalToken(AsmToken::LBrac)) {
4488 const MCExpr *ImmVal;
4489 if (getParser().parseExpression(ImmVal))
4490 return ParseStatus::NoMatch;
4491 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4492 if (!MCE)
4493 return TokError("immediate value expected for vector index");
4494
4495 SMLoc E = getLoc();
4496
4497 if (parseToken(AsmToken::RBrac, "']' expected"))
4498 return ParseStatus::Failure;
4499
4500 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4501 E, getContext()));
4502 return ParseStatus::Success;
4503 }
4504
4505 return ParseStatus::NoMatch;
4506}
4507
4508// tryParseVectorRegister - Try to parse a vector register name with
4509// optional kind specifier. If it is a register specifier, eat the token
4510// and return it.
4511ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4512 StringRef &Kind,
4513 RegKind MatchKind) {
4514 const AsmToken &Tok = getTok();
4515
4516 if (Tok.isNot(AsmToken::Identifier))
4517 return ParseStatus::NoMatch;
4518
4519 StringRef Name = Tok.getString();
4520 // If there is a kind specifier, it's separated from the register name by
4521 // a '.'.
4522 size_t Start = 0, Next = Name.find('.');
4523 StringRef Head = Name.slice(Start, Next);
4524 MCRegister RegNum = matchRegisterNameAlias(Head, MatchKind);
4525
4526 if (RegNum) {
4527 if (Next != StringRef::npos) {
4528 Kind = Name.substr(Next);
4529 if (!isValidVectorKind(Kind, MatchKind))
4530 return TokError("invalid vector kind qualifier");
4531 }
4532 Lex(); // Eat the register token.
4533
4534 Reg = RegNum;
4535 return ParseStatus::Success;
4536 }
4537
4538 return ParseStatus::NoMatch;
4539}
4540
4541ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4542 OperandVector &Operands) {
4543 ParseStatus Status =
4544 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4545 if (!Status.isSuccess())
4546 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4547 return Status;
4548}
4549
4550/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4551template <RegKind RK>
4552ParseStatus
4553AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4554 // Check for a SVE predicate register specifier first.
4555 const SMLoc S = getLoc();
4556 StringRef Kind;
4557 MCRegister RegNum;
4558 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4559 if (!Res.isSuccess())
4560 return Res;
4561
4562 const auto &KindRes = parseVectorKind(Kind, RK);
4563 if (!KindRes)
4564 return ParseStatus::NoMatch;
4565
4566 unsigned ElementWidth = KindRes->second;
4567 Operands.push_back(AArch64Operand::CreateVectorReg(
4568 RegNum, RK, ElementWidth, S,
4569 getLoc(), getContext()));
4570
4571 if (getLexer().is(AsmToken::LBrac)) {
4572 if (RK == RegKind::SVEPredicateAsCounter) {
4573 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4574 if (ResIndex.isSuccess())
4575 return ParseStatus::Success;
4576 } else {
4577 // Indexed predicate, there's no comma so try parse the next operand
4578 // immediately.
4579 if (parseOperand(Operands, false, false))
4580 return ParseStatus::NoMatch;
4581 }
4582 }
4583
4584 // Not all predicates are followed by a '/m' or '/z'.
4585 if (getTok().isNot(AsmToken::Slash))
4586 return ParseStatus::Success;
4587
4588 // But when they do they shouldn't have an element type suffix.
4589 if (!Kind.empty())
4590 return Error(S, "not expecting size suffix");
4591
4592 // Add a literal slash as operand
4593 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4594
4595 Lex(); // Eat the slash.
4596
4597 // Zeroing or merging?
4598 auto Pred = getTok().getString().lower();
4599 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4600 return Error(getLoc(), "expecting 'z' predication");
4601
4602 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4603 return Error(getLoc(), "expecting 'm' or 'z' predication");
4604
4605 // Add zero/merge token.
4606 const char *ZM = Pred == "z" ? "z" : "m";
4607 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4608
4609 Lex(); // Eat zero/merge token.
4610 return ParseStatus::Success;
4611}
4612
4613/// parseRegister - Parse a register operand.
4614bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4615 // Try for a Neon vector register.
4616 if (!tryParseNeonVectorRegister(Operands))
4617 return false;
4618
4619 if (tryParseZTOperand(Operands).isSuccess())
4620 return false;
4621
4622 // Otherwise try for a scalar register.
4623 if (tryParseGPROperand<false>(Operands).isSuccess())
4624 return false;
4625
4626 return true;
4627}
4628
4629bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4630 bool HasELFModifier = false;
4631 AArch64::Specifier RefKind;
4632 SMLoc Loc = getLexer().getLoc();
4633 if (parseOptionalToken(AsmToken::Colon)) {
4634 HasELFModifier = true;
4635
4636 if (getTok().isNot(AsmToken::Identifier))
4637 return TokError("expect relocation specifier in operand after ':'");
4638
4639 std::string LowerCase = getTok().getIdentifier().lower();
4640 RefKind = StringSwitch<AArch64::Specifier>(LowerCase)
4641 .Case("lo12", AArch64::S_LO12)
4642 .Case("abs_g3", AArch64::S_ABS_G3)
4643 .Case("abs_g2", AArch64::S_ABS_G2)
4644 .Case("abs_g2_s", AArch64::S_ABS_G2_S)
4645 .Case("abs_g2_nc", AArch64::S_ABS_G2_NC)
4646 .Case("abs_g1", AArch64::S_ABS_G1)
4647 .Case("abs_g1_s", AArch64::S_ABS_G1_S)
4648 .Case("abs_g1_nc", AArch64::S_ABS_G1_NC)
4649 .Case("abs_g0", AArch64::S_ABS_G0)
4650 .Case("abs_g0_s", AArch64::S_ABS_G0_S)
4651 .Case("abs_g0_nc", AArch64::S_ABS_G0_NC)
4652 .Case("prel_g3", AArch64::S_PREL_G3)
4653 .Case("prel_g2", AArch64::S_PREL_G2)
4654 .Case("prel_g2_nc", AArch64::S_PREL_G2_NC)
4655 .Case("prel_g1", AArch64::S_PREL_G1)
4656 .Case("prel_g1_nc", AArch64::S_PREL_G1_NC)
4657 .Case("prel_g0", AArch64::S_PREL_G0)
4658 .Case("prel_g0_nc", AArch64::S_PREL_G0_NC)
4659 .Case("dtprel", AArch64::S_DTPREL)
4660 .Case("dtprel_g2", AArch64::S_DTPREL_G2)
4661 .Case("dtprel_g1", AArch64::S_DTPREL_G1)
4662 .Case("dtprel_g1_nc", AArch64::S_DTPREL_G1_NC)
4663 .Case("dtprel_g0", AArch64::S_DTPREL_G0)
4664 .Case("dtprel_g0_nc", AArch64::S_DTPREL_G0_NC)
4665 .Case("dtprel_hi12", AArch64::S_DTPREL_HI12)
4666 .Case("dtprel_lo12", AArch64::S_DTPREL_LO12)
4667 .Case("dtprel_lo12_nc", AArch64::S_DTPREL_LO12_NC)
4668 .Case("pg_hi21_nc", AArch64::S_ABS_PAGE_NC)
4669 .Case("tprel_g2", AArch64::S_TPREL_G2)
4670 .Case("tprel_g1", AArch64::S_TPREL_G1)
4671 .Case("tprel_g1_nc", AArch64::S_TPREL_G1_NC)
4672 .Case("tprel_g0", AArch64::S_TPREL_G0)
4673 .Case("tprel_g0_nc", AArch64::S_TPREL_G0_NC)
4674 .Case("tprel_hi12", AArch64::S_TPREL_HI12)
4675 .Case("tprel_lo12", AArch64::S_TPREL_LO12)
4676 .Case("tprel_lo12_nc", AArch64::S_TPREL_LO12_NC)
4677 .Case("tlsdesc_lo12", AArch64::S_TLSDESC_LO12)
4678 .Case("tlsdesc_auth_lo12", AArch64::S_TLSDESC_AUTH_LO12)
4679 .Case("got", AArch64::S_GOT_PAGE)
4680 .Case("gotpage_lo15", AArch64::S_GOT_PAGE_LO15)
4681 .Case("got_lo12", AArch64::S_GOT_LO12)
4682 .Case("got_auth", AArch64::S_GOT_AUTH_PAGE)
4683 .Case("got_auth_lo12", AArch64::S_GOT_AUTH_LO12)
4684 .Case("gottprel", AArch64::S_GOTTPREL_PAGE)
4685 .Case("gottprel_lo12", AArch64::S_GOTTPREL_LO12_NC)
4686 .Case("gottprel_g1", AArch64::S_GOTTPREL_G1)
4687 .Case("gottprel_g0_nc", AArch64::S_GOTTPREL_G0_NC)
4688 .Case("tlsdesc", AArch64::S_TLSDESC_PAGE)
4689 .Case("tlsdesc_auth", AArch64::S_TLSDESC_AUTH_PAGE)
4690 .Case("secrel_lo12", AArch64::S_SECREL_LO12)
4691 .Case("secrel_hi12", AArch64::S_SECREL_HI12)
4692 .Default(AArch64::S_INVALID);
4693
4694 if (RefKind == AArch64::S_INVALID)
4695 return TokError("expect relocation specifier in operand after ':'");
4696
4697 Lex(); // Eat identifier
4698
4699 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4700 return true;
4701 }
4702
4703 if (getParser().parseExpression(ImmVal))
4704 return true;
4705
4706 if (HasELFModifier)
4707 ImmVal = MCSpecifierExpr::create(ImmVal, RefKind, getContext(), Loc);
4708
4709 SMLoc EndLoc;
4710 if (getContext().getAsmInfo()->hasSubsectionsViaSymbols()) {
4711 if (getParser().parseAtSpecifier(ImmVal, EndLoc))
4712 return true;
4713 const MCExpr *Term;
4714 MCBinaryExpr::Opcode Opcode;
4715 if (parseOptionalToken(AsmToken::Plus))
4716 Opcode = MCBinaryExpr::Add;
4717 else if (parseOptionalToken(AsmToken::Minus))
4718 Opcode = MCBinaryExpr::Sub;
4719 else
4720 return false;
4721 if (getParser().parsePrimaryExpr(Term, EndLoc))
4722 return true;
4723 ImmVal = MCBinaryExpr::create(Opcode, ImmVal, Term, getContext());
4724 }
4725
4726 return false;
4727}
4728
4729ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4730 if (getTok().isNot(AsmToken::LCurly))
4731 return ParseStatus::NoMatch;
4732
4733 auto ParseMatrixTile = [this](unsigned &Reg,
4734 unsigned &ElementWidth) -> ParseStatus {
4735 StringRef Name = getTok().getString();
4736 size_t DotPosition = Name.find('.');
4737 if (DotPosition == StringRef::npos)
4738 return ParseStatus::NoMatch;
4739
4740 unsigned RegNum = matchMatrixTileListRegName(Name);
4741 if (!RegNum)
4742 return ParseStatus::NoMatch;
4743
4744 StringRef Tail = Name.drop_front(DotPosition);
4745 const std::optional<std::pair<int, int>> &KindRes =
4746 parseVectorKind(Tail, RegKind::Matrix);
4747 if (!KindRes)
4748 return TokError(
4749 "Expected the register to be followed by element width suffix");
4750 ElementWidth = KindRes->second;
4751 Reg = RegNum;
4752 Lex(); // Eat the register.
4753 return ParseStatus::Success;
4754 };
4755
4756 SMLoc S = getLoc();
4757 auto LCurly = getTok();
4758 Lex(); // Eat left bracket token.
4759
4760 // Empty matrix list
4761 if (parseOptionalToken(AsmToken::RCurly)) {
4762 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4763 /*RegMask=*/0, S, getLoc(), getContext()));
4764 return ParseStatus::Success;
4765 }
4766
4767 // Try parse {za} alias early
4768 if (getTok().getString().equals_insensitive("za")) {
4769 Lex(); // Eat 'za'
4770
4771 if (parseToken(AsmToken::RCurly, "'}' expected"))
4772 return ParseStatus::Failure;
4773
4774 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4775 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4776 return ParseStatus::Success;
4777 }
4778
4779 SMLoc TileLoc = getLoc();
4780
4781 unsigned FirstReg, ElementWidth;
4782 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4783 if (!ParseRes.isSuccess()) {
4784 getLexer().UnLex(LCurly);
4785 return ParseRes;
4786 }
4787
4788 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4789
4790 unsigned PrevReg = FirstReg;
4791
4792 SmallSet<unsigned, 8> DRegs;
4793 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4794
4795 SmallSet<unsigned, 8> SeenRegs;
4796 SeenRegs.insert(FirstReg);
4797
4798 while (parseOptionalToken(AsmToken::Comma)) {
4799 TileLoc = getLoc();
4800 unsigned Reg, NextElementWidth;
4801 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4802 if (!ParseRes.isSuccess())
4803 return ParseRes;
4804
4805 // Element size must match on all regs in the list.
4806 if (ElementWidth != NextElementWidth)
4807 return Error(TileLoc, "mismatched register size suffix");
4808
4809 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4810 Warning(TileLoc, "tile list not in ascending order");
4811
4812 if (SeenRegs.contains(Reg))
4813 Warning(TileLoc, "duplicate tile in list");
4814 else {
4815 SeenRegs.insert(Reg);
4816 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4817 }
4818
4819 PrevReg = Reg;
4820 }
4821
4822 if (parseToken(AsmToken::RCurly, "'}' expected"))
4823 return ParseStatus::Failure;
4824
4825 unsigned RegMask = 0;
4826 for (auto Reg : DRegs)
4827 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4828 RI->getEncodingValue(AArch64::ZAD0));
4829 Operands.push_back(
4830 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4831
4832 return ParseStatus::Success;
4833}
4834
4835template <RegKind VectorKind>
4836ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4837 bool ExpectMatch) {
4838 MCAsmParser &Parser = getParser();
4839 if (!getTok().is(AsmToken::LCurly))
4840 return ParseStatus::NoMatch;
4841
4842 // Wrapper around parse function
4843 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4844 bool NoMatchIsError) -> ParseStatus {
4845 auto RegTok = getTok();
4846 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4847 if (ParseRes.isSuccess()) {
4848 if (parseVectorKind(Kind, VectorKind))
4849 return ParseRes;
4850 llvm_unreachable("Expected a valid vector kind");
4851 }
4852
4853 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4854 RegTok.getString().equals_insensitive("zt0"))
4855 return ParseStatus::NoMatch;
4856
4857 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4858 (ParseRes.isNoMatch() && NoMatchIsError &&
4859 !RegTok.getString().starts_with_insensitive("za")))
4860 return Error(Loc, "vector register expected");
4861
4862 return ParseStatus::NoMatch;
4863 };
4864
4865 unsigned NumRegs = getNumRegsForRegKind(VectorKind);
4866 SMLoc S = getLoc();
4867 auto LCurly = getTok();
4868 Lex(); // Eat left bracket token.
4869
4870 StringRef Kind;
4871 MCRegister FirstReg;
4872 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4873
4874 // Put back the original left bracket if there was no match, so that
4875 // different types of list-operands can be matched (e.g. SVE, Neon).
4876 if (ParseRes.isNoMatch())
4877 Parser.getLexer().UnLex(LCurly);
4878
4879 if (!ParseRes.isSuccess())
4880 return ParseRes;
4881
4882 MCRegister PrevReg = FirstReg;
4883 unsigned Count = 1;
4884
4885 unsigned Stride = 1;
4886 if (parseOptionalToken(AsmToken::Minus)) {
4887 SMLoc Loc = getLoc();
4888 StringRef NextKind;
4889
4890 MCRegister Reg;
4891 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4892 if (!ParseRes.isSuccess())
4893 return ParseRes;
4894
4895 // Any Kind suffices must match on all regs in the list.
4896 if (Kind != NextKind)
4897 return Error(Loc, "mismatched register size suffix");
4898
4899 unsigned Space =
4900 (PrevReg < Reg) ? (Reg - PrevReg) : (NumRegs - (PrevReg - Reg));
4901
4902 if (Space == 0 || Space > 3)
4903 return Error(Loc, "invalid number of vectors");
4904
4905 Count += Space;
4906 }
4907 else {
4908 bool HasCalculatedStride = false;
4909 while (parseOptionalToken(AsmToken::Comma)) {
4910 SMLoc Loc = getLoc();
4911 StringRef NextKind;
4912 MCRegister Reg;
4913 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4914 if (!ParseRes.isSuccess())
4915 return ParseRes;
4916
4917 // Any Kind suffices must match on all regs in the list.
4918 if (Kind != NextKind)
4919 return Error(Loc, "mismatched register size suffix");
4920
4921 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4922 unsigned PrevRegVal =
4923 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4924 if (!HasCalculatedStride) {
4925 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4926 : (NumRegs - (PrevRegVal - RegVal));
4927 HasCalculatedStride = true;
4928 }
4929
4930 // Register must be incremental (with a wraparound at last register).
4931 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4932 return Error(Loc, "registers must have the same sequential stride");
4933
4934 PrevReg = Reg;
4935 ++Count;
4936 }
4937 }
4938
4939 if (parseToken(AsmToken::RCurly, "'}' expected"))
4940 return ParseStatus::Failure;
4941
4942 if (Count > 4)
4943 return Error(S, "invalid number of vectors");
4944
4945 unsigned NumElements = 0;
4946 unsigned ElementWidth = 0;
4947 if (!Kind.empty()) {
4948 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4949 std::tie(NumElements, ElementWidth) = *VK;
4950 }
4951
4952 Operands.push_back(AArch64Operand::CreateVectorList(
4953 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4954 getLoc(), getContext()));
4955
4956 if (getTok().is(AsmToken::LBrac)) {
4957 ParseStatus Res = tryParseVectorIndex(Operands);
4958 if (Res.isFailure())
4959 return ParseStatus::Failure;
4960 return ParseStatus::Success;
4961 }
4962
4963 return ParseStatus::Success;
4964}
4965
4966/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4967bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4968 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4969 if (!ParseRes.isSuccess())
4970 return true;
4971
4972 return tryParseVectorIndex(Operands).isFailure();
4973}
4974
4975ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4976 SMLoc StartLoc = getLoc();
4977
4978 MCRegister RegNum;
4979 ParseStatus Res = tryParseScalarRegister(RegNum);
4980 if (!Res.isSuccess())
4981 return Res;
4982
4983 if (!parseOptionalToken(AsmToken::Comma)) {
4984 Operands.push_back(AArch64Operand::CreateReg(
4985 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4986 return ParseStatus::Success;
4987 }
4988
4989 parseOptionalToken(AsmToken::Hash);
4990
4991 if (getTok().isNot(AsmToken::Integer))
4992 return Error(getLoc(), "index must be absent or #0");
4993
4994 const MCExpr *ImmVal;
4995 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4996 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4997 return Error(getLoc(), "index must be absent or #0");
4998
4999 Operands.push_back(AArch64Operand::CreateReg(
5000 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
5001 return ParseStatus::Success;
5002}
5003
5004ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
5005 SMLoc StartLoc = getLoc();
5006 const AsmToken &Tok = getTok();
5007 std::string Name = Tok.getString().lower();
5008
5009 MCRegister Reg = matchRegisterNameAlias(Name, RegKind::LookupTable);
5010
5011 if (!Reg)
5012 return ParseStatus::NoMatch;
5013
5014 Operands.push_back(AArch64Operand::CreateReg(
5015 Reg, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
5016 Lex(); // Eat register.
5017
5018 // Check if register is followed by an index
5019 if (parseOptionalToken(AsmToken::LBrac)) {
5020 Operands.push_back(
5021 AArch64Operand::CreateToken("[", getLoc(), getContext()));
5022 const MCExpr *ImmVal;
5023 if (getParser().parseExpression(ImmVal))
5024 return ParseStatus::NoMatch;
5025 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
5026 if (!MCE)
5027 return TokError("immediate value expected for vector index");
5028 Operands.push_back(AArch64Operand::CreateImm(
5029 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
5030 getLoc(), getContext()));
5031 if (parseOptionalToken(AsmToken::Comma))
5032 if (parseOptionalMulOperand(Operands))
5033 return ParseStatus::Failure;
5034 if (parseToken(AsmToken::RBrac, "']' expected"))
5035 return ParseStatus::Failure;
5036 Operands.push_back(
5037 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5038 }
5039 return ParseStatus::Success;
5040}
5041
5042template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
5043ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
5044 SMLoc StartLoc = getLoc();
5045
5046 MCRegister RegNum;
5047 ParseStatus Res = tryParseScalarRegister(RegNum);
5048 if (!Res.isSuccess())
5049 return Res;
5050
5051 // No shift/extend is the default.
5052 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
5053 Operands.push_back(AArch64Operand::CreateReg(
5054 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
5055 return ParseStatus::Success;
5056 }
5057
5058 // Eat the comma
5059 Lex();
5060
5061 // Match the shift
5063 Res = tryParseOptionalShiftExtend(ExtOpnd);
5064 if (!Res.isSuccess())
5065 return Res;
5066
5067 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
5068 Operands.push_back(AArch64Operand::CreateReg(
5069 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
5070 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5071 Ext->hasShiftExtendAmount()));
5072
5073 return ParseStatus::Success;
5074}
5075
5076bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
5077 MCAsmParser &Parser = getParser();
5078
5079 // Some SVE instructions have a decoration after the immediate, i.e.
5080 // "mul vl". We parse them here and add tokens, which must be present in the
5081 // asm string in the tablegen instruction.
5082 bool NextIsVL =
5083 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
5084 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
5085 if (!getTok().getString().equals_insensitive("mul") ||
5086 !(NextIsVL || NextIsHash))
5087 return true;
5088
5089 Operands.push_back(
5090 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
5091 Lex(); // Eat the "mul"
5092
5093 if (NextIsVL) {
5094 Operands.push_back(
5095 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
5096 Lex(); // Eat the "vl"
5097 return false;
5098 }
5099
5100 if (NextIsHash) {
5101 Lex(); // Eat the #
5102 SMLoc S = getLoc();
5103
5104 // Parse immediate operand.
5105 const MCExpr *ImmVal;
5106 if (!Parser.parseExpression(ImmVal))
5107 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
5108 Operands.push_back(AArch64Operand::CreateImm(
5109 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
5110 getContext()));
5111 return false;
5112 }
5113 }
5114
5115 return Error(getLoc(), "expected 'vl' or '#<imm>'");
5116}
5117
5118bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
5119 StringRef &VecGroup) {
5120 MCAsmParser &Parser = getParser();
5121 auto Tok = Parser.getTok();
5122 if (Tok.isNot(AsmToken::Identifier))
5123 return true;
5124
5125 StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
5126 .Case("vgx2", "vgx2")
5127 .Case("vgx4", "vgx4")
5128 .Default("");
5129
5130 if (VG.empty())
5131 return true;
5132
5133 VecGroup = VG;
5134 Parser.Lex(); // Eat vgx[2|4]
5135 return false;
5136}
5137
5138bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
5139 auto Tok = getTok();
5140 if (Tok.isNot(AsmToken::Identifier))
5141 return true;
5142
5143 auto Keyword = Tok.getString();
5144 Keyword = StringSwitch<StringRef>(Keyword.lower())
5145 .Case("sm", "sm")
5146 .Case("za", "za")
5147 .Default(Keyword);
5148 Operands.push_back(
5149 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
5150
5151 Lex();
5152 return false;
5153}
5154
5155/// parseOperand - Parse a arm instruction operand. For now this parses the
5156/// operand regardless of the mnemonic.
5157bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
5158 bool invertCondCode) {
5159 MCAsmParser &Parser = getParser();
5160
5161 ParseStatus ResTy =
5162 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
5163
5164 // Check if the current operand has a custom associated parser, if so, try to
5165 // custom parse the operand, or fallback to the general approach.
5166 if (ResTy.isSuccess())
5167 return false;
5168 // If there wasn't a custom match, try the generic matcher below. Otherwise,
5169 // there was a match, but an error occurred, in which case, just return that
5170 // the operand parsing failed.
5171 if (ResTy.isFailure())
5172 return true;
5173
5174 // Nothing custom, so do general case parsing.
5175 SMLoc S, E;
5176 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
5177 if (parseOptionalToken(AsmToken::Comma)) {
5178 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
5179 if (!Res.isNoMatch())
5180 return Res.isFailure();
5181 getLexer().UnLex(SavedTok);
5182 }
5183 return false;
5184 };
5185 switch (getLexer().getKind()) {
5186 default: {
5187 SMLoc S = getLoc();
5188 const MCExpr *Expr;
5189 if (parseSymbolicImmVal(Expr))
5190 return Error(S, "invalid operand");
5191
5192 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5193 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
5194 return parseOptionalShiftExtend(getTok());
5195 }
5196 case AsmToken::LBrac: {
5197 Operands.push_back(
5198 AArch64Operand::CreateToken("[", getLoc(), getContext()));
5199 Lex(); // Eat '['
5200
5201 // There's no comma after a '[', so we can parse the next operand
5202 // immediately.
5203 return parseOperand(Operands, false, false);
5204 }
5205 case AsmToken::LCurly: {
5206 if (!parseNeonVectorList(Operands))
5207 return false;
5208
5209 Operands.push_back(
5210 AArch64Operand::CreateToken("{", getLoc(), getContext()));
5211 Lex(); // Eat '{'
5212
5213 // There's no comma after a '{', so we can parse the next operand
5214 // immediately.
5215 return parseOperand(Operands, false, false);
5216 }
5217 case AsmToken::Identifier: {
5218 // See if this is a "VG" decoration used by SME instructions.
5219 StringRef VecGroup;
5220 if (!parseOptionalVGOperand(Operands, VecGroup)) {
5221 Operands.push_back(
5222 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
5223 return false;
5224 }
5225 // If we're expecting a Condition Code operand, then just parse that.
5226 if (isCondCode)
5227 return parseCondCode(Operands, invertCondCode);
5228
5229 // If it's a register name, parse it.
5230 if (!parseRegister(Operands)) {
5231 // Parse an optional shift/extend modifier.
5232 AsmToken SavedTok = getTok();
5233 if (parseOptionalToken(AsmToken::Comma)) {
5234 // The operand after the register may be a label (e.g. ADR/ADRP). Check
5235 // such cases and don't report an error when <label> happens to match a
5236 // shift/extend modifier.
5237 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
5238 /*ParseForAllFeatures=*/true);
5239 if (!Res.isNoMatch())
5240 return Res.isFailure();
5241 Res = tryParseOptionalShiftExtend(Operands);
5242 if (!Res.isNoMatch())
5243 return Res.isFailure();
5244 getLexer().UnLex(SavedTok);
5245 }
5246 return false;
5247 }
5248
5249 // See if this is a "mul vl" decoration or "mul #<int>" operand used
5250 // by SVE instructions.
5251 if (!parseOptionalMulOperand(Operands))
5252 return false;
5253
5254 // If this is a two-word mnemonic, parse its special keyword
5255 // operand as an identifier.
5256 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
5257 Mnemonic == "gcsb")
5258 return parseKeywordOperand(Operands);
5259
5260 // This was not a register so parse other operands that start with an
5261 // identifier (like labels) as expressions and create them as immediates.
5262 const MCExpr *IdVal, *Term;
5263 S = getLoc();
5264 if (getParser().parseExpression(IdVal))
5265 return true;
5266 if (getParser().parseAtSpecifier(IdVal, E))
5267 return true;
5268 std::optional<MCBinaryExpr::Opcode> Opcode;
5269 if (parseOptionalToken(AsmToken::Plus))
5270 Opcode = MCBinaryExpr::Add;
5271 else if (parseOptionalToken(AsmToken::Minus))
5272 Opcode = MCBinaryExpr::Sub;
5273 if (Opcode) {
5274 if (getParser().parsePrimaryExpr(Term, E))
5275 return true;
5276 IdVal = MCBinaryExpr::create(*Opcode, IdVal, Term, getContext());
5277 }
5278 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
5279
5280 // Parse an optional shift/extend modifier.
5281 return parseOptionalShiftExtend(getTok());
5282 }
5283 case AsmToken::Integer:
5284 case AsmToken::Real:
5285 case AsmToken::Hash: {
5286 // #42 -> immediate.
5287 S = getLoc();
5288
5289 parseOptionalToken(AsmToken::Hash);
5290
5291 // Parse a negative sign
5292 bool isNegative = false;
5293 if (getTok().is(AsmToken::Minus)) {
5294 isNegative = true;
5295 // We need to consume this token only when we have a Real, otherwise
5296 // we let parseSymbolicImmVal take care of it
5297 if (Parser.getLexer().peekTok().is(AsmToken::Real))
5298 Lex();
5299 }
5300
5301 // The only Real that should come through here is a literal #0.0 for
5302 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
5303 // so convert the value.
5304 const AsmToken &Tok = getTok();
5305 if (Tok.is(AsmToken::Real)) {
5306 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
5307 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5308 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
5309 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
5310 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
5311 return TokError("unexpected floating point literal");
5312 else if (IntVal != 0 || isNegative)
5313 return TokError("expected floating-point constant #0.0");
5314 Lex(); // Eat the token.
5315
5316 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
5317 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
5318 return false;
5319 }
5320
5321 const MCExpr *ImmVal;
5322 if (parseSymbolicImmVal(ImmVal))
5323 return true;
5324
5325 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5326 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
5327
5328 // Parse an optional shift/extend modifier.
5329 return parseOptionalShiftExtend(Tok);
5330 }
5331 case AsmToken::Equal: {
5332 SMLoc Loc = getLoc();
5333 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5334 return TokError("unexpected token in operand");
5335 Lex(); // Eat '='
5336 const MCExpr *SubExprVal;
5337 if (getParser().parseExpression(SubExprVal))
5338 return true;
5339
5340 if (Operands.size() < 2 ||
5341 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
5342 return Error(Loc, "Only valid when first operand is register");
5343
5344 bool IsXReg =
5345 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5346 Operands[1]->getReg());
5347
5348 MCContext& Ctx = getContext();
5349 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
5350 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
5351 if (isa<MCConstantExpr>(SubExprVal)) {
5352 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
5353 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5354 while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
5355 ShiftAmt += 16;
5356 Imm >>= 16;
5357 }
5358 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5359 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
5360 Operands.push_back(AArch64Operand::CreateImm(
5361 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
5362 if (ShiftAmt)
5363 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
5364 ShiftAmt, true, S, E, Ctx));
5365 return false;
5366 }
5367 APInt Simm = APInt(64, Imm << ShiftAmt);
5368 // check if the immediate is an unsigned or signed 32-bit int for W regs
5369 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
5370 return Error(Loc, "Immediate too large for register");
5371 }
5372 // If it is a label or an imm that cannot fit in a movz, put it into CP.
5373 const MCExpr *CPLoc =
5374 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
5375 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
5376 return false;
5377 }
5378 }
5379}
5380
5381bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5382 const MCExpr *Expr = nullptr;
5383 SMLoc L = getLoc();
5384 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5385 return true;
5386 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5387 if (check(!Value, L, "expected constant expression"))
5388 return true;
5389 Out = Value->getValue();
5390 return false;
5391}
5392
5393bool AArch64AsmParser::parseComma() {
5394 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
5395 return true;
5396 // Eat the comma
5397 Lex();
5398 return false;
5399}
5400
5401bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5402 unsigned First, unsigned Last) {
5403 MCRegister Reg;
5404 SMLoc Start, End;
5405 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
5406 return true;
5407
5408 // Special handling for FP and LR; they aren't linearly after x28 in
5409 // the registers enum.
5410 unsigned RangeEnd = Last;
5411 if (Base == AArch64::X0) {
5412 if (Last == AArch64::FP) {
5413 RangeEnd = AArch64::X28;
5414 if (Reg == AArch64::FP) {
5415 Out = 29;
5416 return false;
5417 }
5418 }
5419 if (Last == AArch64::LR) {
5420 RangeEnd = AArch64::X28;
5421 if (Reg == AArch64::FP) {
5422 Out = 29;
5423 return false;
5424 } else if (Reg == AArch64::LR) {
5425 Out = 30;
5426 return false;
5427 }
5428 }
5429 }
5430
5431 if (check(Reg < First || Reg > RangeEnd, Start,
5432 Twine("expected register in range ") +
5435 return true;
5436 Out = Reg - Base;
5437 return false;
5438}
5439
5440bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5441 const MCParsedAsmOperand &Op2) const {
5442 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5443 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5444
5445 if (AOp1.isVectorList() && AOp2.isVectorList())
5446 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5447 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5448 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5449
5450 if (!AOp1.isReg() || !AOp2.isReg())
5451 return false;
5452
5453 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5454 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5455 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5456
5457 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5458 "Testing equality of non-scalar registers not supported");
5459
5460 // Check if a registers match their sub/super register classes.
5461 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5462 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5463 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5464 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5465 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5466 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5467 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5468 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5469
5470 return false;
5471}
5472
5473/// Parse an AArch64 instruction mnemonic followed by its operands.
5474bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5475 StringRef Name, SMLoc NameLoc,
5476 OperandVector &Operands) {
5477 Name = StringSwitch<StringRef>(Name.lower())
5478 .Case("beq", "b.eq")
5479 .Case("bne", "b.ne")
5480 .Case("bhs", "b.hs")
5481 .Case("bcs", "b.cs")
5482 .Case("blo", "b.lo")
5483 .Case("bcc", "b.cc")
5484 .Case("bmi", "b.mi")
5485 .Case("bpl", "b.pl")
5486 .Case("bvs", "b.vs")
5487 .Case("bvc", "b.vc")
5488 .Case("bhi", "b.hi")
5489 .Case("bls", "b.ls")
5490 .Case("bge", "b.ge")
5491 .Case("blt", "b.lt")
5492 .Case("bgt", "b.gt")
5493 .Case("ble", "b.le")
5494 .Case("bal", "b.al")
5495 .Case("bnv", "b.nv")
5496 .Default(Name);
5497
5498 // First check for the AArch64-specific .req directive.
5499 if (getTok().is(AsmToken::Identifier) &&
5500 getTok().getIdentifier().lower() == ".req") {
5501 parseDirectiveReq(Name, NameLoc);
5502 // We always return 'error' for this, as we're done with this
5503 // statement and don't need to match the 'instruction."
5504 return true;
5505 }
5506
5507 // Create the leading tokens for the mnemonic, split by '.' characters.
5508 size_t Start = 0, Next = Name.find('.');
5509 StringRef Head = Name.slice(Start, Next);
5510
5511 // IC, DC, AT, TLBI, MLBI, PLBI, GIC{R}, GSB and Prediction invalidation
5512 // instructions are aliases for the SYS instruction.
5513 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5514 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp" ||
5515 Head == "mlbi" || Head == "plbi" || Head == "gic" || Head == "gsb")
5516 return parseSysAlias(Head, NameLoc, Operands);
5517
5518 // GICR instructions are aliases for the SYSL instruction.
5519 if (Head == "gicr")
5520 return parseSyslAlias(Head, NameLoc, Operands);
5521
5522 // TLBIP instructions are aliases for the SYSP instruction.
5523 if (Head == "tlbip")
5524 return parseSyspAlias(Head, NameLoc, Operands);
5525
5526 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5527 Mnemonic = Head;
5528
5529 // Handle condition codes for a branch mnemonic
5530 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5531 Start = Next;
5532 Next = Name.find('.', Start + 1);
5533 Head = Name.slice(Start + 1, Next);
5534
5535 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5536 (Head.data() - Name.data()));
5537 std::string Suggestion;
5538 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5539 if (CC == AArch64CC::Invalid) {
5540 std::string Msg = "invalid condition code";
5541 if (!Suggestion.empty())
5542 Msg += ", did you mean " + Suggestion + "?";
5543 return Error(SuffixLoc, Msg);
5544 }
5545 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5546 /*IsSuffix=*/true));
5547 Operands.push_back(
5548 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5549 }
5550
5551 // Add the remaining tokens in the mnemonic.
5552 while (Next != StringRef::npos) {
5553 Start = Next;
5554 Next = Name.find('.', Start + 1);
5555 Head = Name.slice(Start, Next);
5556 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5557 (Head.data() - Name.data()) + 1);
5558 Operands.push_back(AArch64Operand::CreateToken(
5559 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5560 }
5561
5562 // Conditional compare instructions have a Condition Code operand, which needs
5563 // to be parsed and an immediate operand created.
5564 bool condCodeFourthOperand =
5565 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5566 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5567 Head == "csinc" || Head == "csinv" || Head == "csneg");
5568
5569 // These instructions are aliases to some of the conditional select
5570 // instructions. However, the condition code is inverted in the aliased
5571 // instruction.
5572 //
5573 // FIXME: Is this the correct way to handle these? Or should the parser
5574 // generate the aliased instructions directly?
5575 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5576 bool condCodeThirdOperand =
5577 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5578
5579 // Read the remaining operands.
5580 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5581
5582 unsigned N = 1;
5583 do {
5584 // Parse and remember the operand.
5585 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5586 (N == 3 && condCodeThirdOperand) ||
5587 (N == 2 && condCodeSecondOperand),
5588 condCodeSecondOperand || condCodeThirdOperand)) {
5589 return true;
5590 }
5591
5592 // After successfully parsing some operands there are three special cases
5593 // to consider (i.e. notional operands not separated by commas). Two are
5594 // due to memory specifiers:
5595 // + An RBrac will end an address for load/store/prefetch
5596 // + An '!' will indicate a pre-indexed operation.
5597 //
5598 // And a further case is '}', which ends a group of tokens specifying the
5599 // SME accumulator array 'ZA' or tile vector, i.e.
5600 //
5601 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5602 //
5603 // It's someone else's responsibility to make sure these tokens are sane
5604 // in the given context!
5605
5606 if (parseOptionalToken(AsmToken::RBrac))
5607 Operands.push_back(
5608 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5609 if (parseOptionalToken(AsmToken::Exclaim))
5610 Operands.push_back(
5611 AArch64Operand::CreateToken("!", getLoc(), getContext()));
5612 if (parseOptionalToken(AsmToken::RCurly))
5613 Operands.push_back(
5614 AArch64Operand::CreateToken("}", getLoc(), getContext()));
5615
5616 ++N;
5617 } while (parseOptionalToken(AsmToken::Comma));
5618 }
5619
5620 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5621 return true;
5622
5623 return false;
5624}
5625
5626static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
5627 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5628 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5629 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5630 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5631 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5632 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5633 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5634}
5635
5636// FIXME: This entire function is a giant hack to provide us with decent
5637// operand range validation/diagnostics until TableGen/MC can be extended
5638// to support autogeneration of this kind of validation.
5639bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5640 SmallVectorImpl<SMLoc> &Loc) {
5641 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5642 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5643
5644 // A prefix only applies to the instruction following it. Here we extract
5645 // prefix information for the next instruction before validating the current
5646 // one so that in the case of failure we don't erroneously continue using the
5647 // current prefix.
5648 PrefixInfo Prefix = NextPrefix;
5649 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5650
5651 // Before validating the instruction in isolation we run through the rules
5652 // applicable when it follows a prefix instruction.
5653 // NOTE: brk & hlt can be prefixed but require no additional validation.
5654 if (Prefix.isActive() &&
5655 (Inst.getOpcode() != AArch64::BRK) &&
5656 (Inst.getOpcode() != AArch64::HLT)) {
5657
5658 // Prefixed instructions must have a destructive operand.
5661 return Error(IDLoc, "instruction is unpredictable when following a"
5662 " movprfx, suggest replacing movprfx with mov");
5663
5664 // Destination operands must match.
5665 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5666 return Error(Loc[0], "instruction is unpredictable when following a"
5667 " movprfx writing to a different destination");
5668
5669 // Destination operand must not be used in any other location.
5670 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5671 if (Inst.getOperand(i).isReg() &&
5672 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5673 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5674 return Error(Loc[0], "instruction is unpredictable when following a"
5675 " movprfx and destination also used as non-destructive"
5676 " source");
5677 }
5678
5679 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5680 if (Prefix.isPredicated()) {
5681 int PgIdx = -1;
5682
5683 // Find the instructions general predicate.
5684 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5685 if (Inst.getOperand(i).isReg() &&
5686 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5687 PgIdx = i;
5688 break;
5689 }
5690
5691 // Instruction must be predicated if the movprfx is predicated.
5692 if (PgIdx == -1 ||
5694 return Error(IDLoc, "instruction is unpredictable when following a"
5695 " predicated movprfx, suggest using unpredicated movprfx");
5696
5697 // Instruction must use same general predicate as the movprfx.
5698 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5699 return Error(IDLoc, "instruction is unpredictable when following a"
5700 " predicated movprfx using a different general predicate");
5701
5702 // Instruction element type must match the movprfx.
5703 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5704 return Error(IDLoc, "instruction is unpredictable when following a"
5705 " predicated movprfx with a different element size");
5706 }
5707 }
5708
5709 // On ARM64EC, only valid registers may be used. Warn against using
5710 // explicitly disallowed registers.
5711 if (IsWindowsArm64EC) {
5712 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5713 if (Inst.getOperand(i).isReg()) {
5714 MCRegister Reg = Inst.getOperand(i).getReg();
5715 // At this point, vector registers are matched to their
5716 // appropriately sized alias.
5717 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5718 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5719 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5720 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5721 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5722 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5723 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5724 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5725 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5726 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5727 Warning(IDLoc, "register " + Twine(RI->getName(Reg)) +
5728 " is disallowed on ARM64EC.");
5729 }
5730 }
5731 }
5732 }
5733
5734 // Check for indexed addressing modes w/ the base register being the
5735 // same as a destination/source register or pair load where
5736 // the Rt == Rt2. All of those are undefined behaviour.
5737 switch (Inst.getOpcode()) {
5738 case AArch64::LDPSWpre:
5739 case AArch64::LDPWpost:
5740 case AArch64::LDPWpre:
5741 case AArch64::LDPXpost:
5742 case AArch64::LDPXpre: {
5743 MCRegister Rt = Inst.getOperand(1).getReg();
5744 MCRegister Rt2 = Inst.getOperand(2).getReg();
5745 MCRegister Rn = Inst.getOperand(3).getReg();
5746 if (RI->isSubRegisterEq(Rn, Rt))
5747 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5748 "is also a destination");
5749 if (RI->isSubRegisterEq(Rn, Rt2))
5750 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5751 "is also a destination");
5752 [[fallthrough]];
5753 }
5754 case AArch64::LDR_ZA:
5755 case AArch64::STR_ZA: {
5756 if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5757 Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5758 return Error(Loc[1],
5759 "unpredictable instruction, immediate and offset mismatch.");
5760 break;
5761 }
5762 case AArch64::LDPDi:
5763 case AArch64::LDPQi:
5764 case AArch64::LDPSi:
5765 case AArch64::LDPSWi:
5766 case AArch64::LDPWi:
5767 case AArch64::LDPXi: {
5768 MCRegister Rt = Inst.getOperand(0).getReg();
5769 MCRegister Rt2 = Inst.getOperand(1).getReg();
5770 if (Rt == Rt2)
5771 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5772 break;
5773 }
5774 case AArch64::LDPDpost:
5775 case AArch64::LDPDpre:
5776 case AArch64::LDPQpost:
5777 case AArch64::LDPQpre:
5778 case AArch64::LDPSpost:
5779 case AArch64::LDPSpre:
5780 case AArch64::LDPSWpost: {
5781 MCRegister Rt = Inst.getOperand(1).getReg();
5782 MCRegister Rt2 = Inst.getOperand(2).getReg();
5783 if (Rt == Rt2)
5784 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5785 break;
5786 }
5787 case AArch64::STPDpost:
5788 case AArch64::STPDpre:
5789 case AArch64::STPQpost:
5790 case AArch64::STPQpre:
5791 case AArch64::STPSpost:
5792 case AArch64::STPSpre:
5793 case AArch64::STPWpost:
5794 case AArch64::STPWpre:
5795 case AArch64::STPXpost:
5796 case AArch64::STPXpre: {
5797 MCRegister Rt = Inst.getOperand(1).getReg();
5798 MCRegister Rt2 = Inst.getOperand(2).getReg();
5799 MCRegister Rn = Inst.getOperand(3).getReg();
5800 if (RI->isSubRegisterEq(Rn, Rt))
5801 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5802 "is also a source");
5803 if (RI->isSubRegisterEq(Rn, Rt2))
5804 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5805 "is also a source");
5806 break;
5807 }
5808 case AArch64::LDRBBpre:
5809 case AArch64::LDRBpre:
5810 case AArch64::LDRHHpre:
5811 case AArch64::LDRHpre:
5812 case AArch64::LDRSBWpre:
5813 case AArch64::LDRSBXpre:
5814 case AArch64::LDRSHWpre:
5815 case AArch64::LDRSHXpre:
5816 case AArch64::LDRSWpre:
5817 case AArch64::LDRWpre:
5818 case AArch64::LDRXpre:
5819 case AArch64::LDRBBpost:
5820 case AArch64::LDRBpost:
5821 case AArch64::LDRHHpost:
5822 case AArch64::LDRHpost:
5823 case AArch64::LDRSBWpost:
5824 case AArch64::LDRSBXpost:
5825 case AArch64::LDRSHWpost:
5826 case AArch64::LDRSHXpost:
5827 case AArch64::LDRSWpost:
5828 case AArch64::LDRWpost:
5829 case AArch64::LDRXpost: {
5830 MCRegister Rt = Inst.getOperand(1).getReg();
5831 MCRegister Rn = Inst.getOperand(2).getReg();
5832 if (RI->isSubRegisterEq(Rn, Rt))
5833 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5834 "is also a source");
5835 break;
5836 }
5837 case AArch64::STRBBpost:
5838 case AArch64::STRBpost:
5839 case AArch64::STRHHpost:
5840 case AArch64::STRHpost:
5841 case AArch64::STRWpost:
5842 case AArch64::STRXpost:
5843 case AArch64::STRBBpre:
5844 case AArch64::STRBpre:
5845 case AArch64::STRHHpre:
5846 case AArch64::STRHpre:
5847 case AArch64::STRWpre:
5848 case AArch64::STRXpre: {
5849 MCRegister Rt = Inst.getOperand(1).getReg();
5850 MCRegister Rn = Inst.getOperand(2).getReg();
5851 if (RI->isSubRegisterEq(Rn, Rt))
5852 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5853 "is also a source");
5854 break;
5855 }
5856 case AArch64::STXRB:
5857 case AArch64::STXRH:
5858 case AArch64::STXRW:
5859 case AArch64::STXRX:
5860 case AArch64::STLXRB:
5861 case AArch64::STLXRH:
5862 case AArch64::STLXRW:
5863 case AArch64::STLXRX: {
5864 MCRegister Rs = Inst.getOperand(0).getReg();
5865 MCRegister Rt = Inst.getOperand(1).getReg();
5866 MCRegister Rn = Inst.getOperand(2).getReg();
5867 if (RI->isSubRegisterEq(Rt, Rs) ||
5868 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5869 return Error(Loc[0],
5870 "unpredictable STXR instruction, status is also a source");
5871 break;
5872 }
5873 case AArch64::STXPW:
5874 case AArch64::STXPX:
5875 case AArch64::STLXPW:
5876 case AArch64::STLXPX: {
5877 MCRegister Rs = Inst.getOperand(0).getReg();
5878 MCRegister Rt1 = Inst.getOperand(1).getReg();
5879 MCRegister Rt2 = Inst.getOperand(2).getReg();
5880 MCRegister Rn = Inst.getOperand(3).getReg();
5881 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5882 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5883 return Error(Loc[0],
5884 "unpredictable STXP instruction, status is also a source");
5885 break;
5886 }
5887 case AArch64::LDRABwriteback:
5888 case AArch64::LDRAAwriteback: {
5889 MCRegister Xt = Inst.getOperand(0).getReg();
5890 MCRegister Xn = Inst.getOperand(1).getReg();
5891 if (Xt == Xn)
5892 return Error(Loc[0],
5893 "unpredictable LDRA instruction, writeback base"
5894 " is also a destination");
5895 break;
5896 }
5897 }
5898
5899 // Check v8.8-A memops instructions.
5900 switch (Inst.getOpcode()) {
5901 case AArch64::CPYFP:
5902 case AArch64::CPYFPWN:
5903 case AArch64::CPYFPRN:
5904 case AArch64::CPYFPN:
5905 case AArch64::CPYFPWT:
5906 case AArch64::CPYFPWTWN:
5907 case AArch64::CPYFPWTRN:
5908 case AArch64::CPYFPWTN:
5909 case AArch64::CPYFPRT:
5910 case AArch64::CPYFPRTWN:
5911 case AArch64::CPYFPRTRN:
5912 case AArch64::CPYFPRTN:
5913 case AArch64::CPYFPT:
5914 case AArch64::CPYFPTWN:
5915 case AArch64::CPYFPTRN:
5916 case AArch64::CPYFPTN:
5917 case AArch64::CPYFM:
5918 case AArch64::CPYFMWN:
5919 case AArch64::CPYFMRN:
5920 case AArch64::CPYFMN:
5921 case AArch64::CPYFMWT:
5922 case AArch64::CPYFMWTWN:
5923 case AArch64::CPYFMWTRN:
5924 case AArch64::CPYFMWTN:
5925 case AArch64::CPYFMRT:
5926 case AArch64::CPYFMRTWN:
5927 case AArch64::CPYFMRTRN:
5928 case AArch64::CPYFMRTN:
5929 case AArch64::CPYFMT:
5930 case AArch64::CPYFMTWN:
5931 case AArch64::CPYFMTRN:
5932 case AArch64::CPYFMTN:
5933 case AArch64::CPYFE:
5934 case AArch64::CPYFEWN:
5935 case AArch64::CPYFERN:
5936 case AArch64::CPYFEN:
5937 case AArch64::CPYFEWT:
5938 case AArch64::CPYFEWTWN:
5939 case AArch64::CPYFEWTRN:
5940 case AArch64::CPYFEWTN:
5941 case AArch64::CPYFERT:
5942 case AArch64::CPYFERTWN:
5943 case AArch64::CPYFERTRN:
5944 case AArch64::CPYFERTN:
5945 case AArch64::CPYFET:
5946 case AArch64::CPYFETWN:
5947 case AArch64::CPYFETRN:
5948 case AArch64::CPYFETN:
5949 case AArch64::CPYP:
5950 case AArch64::CPYPWN:
5951 case AArch64::CPYPRN:
5952 case AArch64::CPYPN:
5953 case AArch64::CPYPWT:
5954 case AArch64::CPYPWTWN:
5955 case AArch64::CPYPWTRN:
5956 case AArch64::CPYPWTN:
5957 case AArch64::CPYPRT:
5958 case AArch64::CPYPRTWN:
5959 case AArch64::CPYPRTRN:
5960 case AArch64::CPYPRTN:
5961 case AArch64::CPYPT:
5962 case AArch64::CPYPTWN:
5963 case AArch64::CPYPTRN:
5964 case AArch64::CPYPTN:
5965 case AArch64::CPYM:
5966 case AArch64::CPYMWN:
5967 case AArch64::CPYMRN:
5968 case AArch64::CPYMN:
5969 case AArch64::CPYMWT:
5970 case AArch64::CPYMWTWN:
5971 case AArch64::CPYMWTRN:
5972 case AArch64::CPYMWTN:
5973 case AArch64::CPYMRT:
5974 case AArch64::CPYMRTWN:
5975 case AArch64::CPYMRTRN:
5976 case AArch64::CPYMRTN:
5977 case AArch64::CPYMT:
5978 case AArch64::CPYMTWN:
5979 case AArch64::CPYMTRN:
5980 case AArch64::CPYMTN:
5981 case AArch64::CPYE:
5982 case AArch64::CPYEWN:
5983 case AArch64::CPYERN:
5984 case AArch64::CPYEN:
5985 case AArch64::CPYEWT:
5986 case AArch64::CPYEWTWN:
5987 case AArch64::CPYEWTRN:
5988 case AArch64::CPYEWTN:
5989 case AArch64::CPYERT:
5990 case AArch64::CPYERTWN:
5991 case AArch64::CPYERTRN:
5992 case AArch64::CPYERTN:
5993 case AArch64::CPYET:
5994 case AArch64::CPYETWN:
5995 case AArch64::CPYETRN:
5996 case AArch64::CPYETN: {
5997 // Xd_wb == op0, Xs_wb == op1, Xn_wb == op2
5998 MCRegister Xd = Inst.getOperand(3).getReg();
5999 MCRegister Xs = Inst.getOperand(4).getReg();
6000 MCRegister Xn = Inst.getOperand(5).getReg();
6001
6002 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6003 assert(Xs == Inst.getOperand(1).getReg() && "Xs_wb and Xs do not match");
6004 assert(Xn == Inst.getOperand(2).getReg() && "Xn_wb and Xn do not match");
6005
6006 if (Xd == Xs)
6007 return Error(Loc[0], "invalid CPY instruction, destination and source"
6008 " registers are the same");
6009 if (Xd == Xn)
6010 return Error(Loc[0], "invalid CPY instruction, destination and size"
6011 " registers are the same");
6012 if (Xs == Xn)
6013 return Error(Loc[0], "invalid CPY instruction, source and size"
6014 " registers are the same");
6015 break;
6016 }
6017 case AArch64::SETP:
6018 case AArch64::SETPT:
6019 case AArch64::SETPN:
6020 case AArch64::SETPTN:
6021 case AArch64::SETM:
6022 case AArch64::SETMT:
6023 case AArch64::SETMN:
6024 case AArch64::SETMTN:
6025 case AArch64::SETE:
6026 case AArch64::SETET:
6027 case AArch64::SETEN:
6028 case AArch64::SETETN:
6029 case AArch64::SETGP:
6030 case AArch64::SETGPT:
6031 case AArch64::SETGPN:
6032 case AArch64::SETGPTN:
6033 case AArch64::SETGM:
6034 case AArch64::SETGMT:
6035 case AArch64::SETGMN:
6036 case AArch64::SETGMTN:
6037 case AArch64::MOPSSETGE:
6038 case AArch64::MOPSSETGET:
6039 case AArch64::MOPSSETGEN:
6040 case AArch64::MOPSSETGETN: {
6041 // Xd_wb == op0, Xn_wb == op1
6042 MCRegister Xd = Inst.getOperand(2).getReg();
6043 MCRegister Xn = Inst.getOperand(3).getReg();
6044 MCRegister Xm = Inst.getOperand(4).getReg();
6045
6046 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6047 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6048
6049 if (Xd == Xn)
6050 return Error(Loc[0], "invalid SET instruction, destination and size"
6051 " registers are the same");
6052 if (Xd == Xm)
6053 return Error(Loc[0], "invalid SET instruction, destination and source"
6054 " registers are the same");
6055 if (Xn == Xm)
6056 return Error(Loc[0], "invalid SET instruction, source and size"
6057 " registers are the same");
6058 break;
6059 }
6060 case AArch64::SETGOP:
6061 case AArch64::SETGOPT:
6062 case AArch64::SETGOPN:
6063 case AArch64::SETGOPTN:
6064 case AArch64::SETGOM:
6065 case AArch64::SETGOMT:
6066 case AArch64::SETGOMN:
6067 case AArch64::SETGOMTN:
6068 case AArch64::SETGOE:
6069 case AArch64::SETGOET:
6070 case AArch64::SETGOEN:
6071 case AArch64::SETGOETN: {
6072 // Xd_wb == op0, Xn_wb == op1
6073 MCRegister Xd = Inst.getOperand(2).getReg();
6074 MCRegister Xn = Inst.getOperand(3).getReg();
6075
6076 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6077 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6078
6079 if (Xd == Xn)
6080 return Error(Loc[0], "invalid SET instruction, destination and size"
6081 " registers are the same");
6082 break;
6083 }
6084 }
6085
6086 // Now check immediate ranges. Separate from the above as there is overlap
6087 // in the instructions being checked and this keeps the nested conditionals
6088 // to a minimum.
6089 switch (Inst.getOpcode()) {
6090 case AArch64::ADDSWri:
6091 case AArch64::ADDSXri:
6092 case AArch64::ADDWri:
6093 case AArch64::ADDXri:
6094 case AArch64::SUBSWri:
6095 case AArch64::SUBSXri:
6096 case AArch64::SUBWri:
6097 case AArch64::SUBXri: {
6098 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
6099 // some slight duplication here.
6100 if (Inst.getOperand(2).isExpr()) {
6101 const MCExpr *Expr = Inst.getOperand(2).getExpr();
6102 AArch64::Specifier ELFSpec;
6103 AArch64::Specifier DarwinSpec;
6104 int64_t Addend;
6105 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
6106
6107 // Only allow these with ADDXri.
6108 if ((DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
6109 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) &&
6110 Inst.getOpcode() == AArch64::ADDXri)
6111 return false;
6112
6113 // Only allow these with ADDXri/ADDWri
6121 ELFSpec) &&
6122 (Inst.getOpcode() == AArch64::ADDXri ||
6123 Inst.getOpcode() == AArch64::ADDWri))
6124 return false;
6125
6126 // Don't allow symbol refs in the immediate field otherwise
6127 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
6128 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
6129 // 'cmp w0, 'borked')
6130 return Error(Loc.back(), "invalid immediate expression");
6131 }
6132 // We don't validate more complex expressions here
6133 }
6134 return false;
6135 }
6136 default:
6137 return false;
6138 }
6139}
6140
6142 const FeatureBitset &FBS,
6143 unsigned VariantID = 0);
6144
6145bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
6147 OperandVector &Operands) {
6148 switch (ErrCode) {
6149 case Match_InvalidTiedOperand: {
6150 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
6151 if (Op.isVectorList())
6152 return Error(Loc, "operand must match destination register list");
6153
6154 assert(Op.isReg() && "Unexpected operand type");
6155 switch (Op.getRegEqualityTy()) {
6156 case RegConstraintEqualityTy::EqualsSubReg:
6157 return Error(Loc, "operand must be 64-bit form of destination register");
6158 case RegConstraintEqualityTy::EqualsSuperReg:
6159 return Error(Loc, "operand must be 32-bit form of destination register");
6160 case RegConstraintEqualityTy::EqualsReg:
6161 return Error(Loc, "operand must match destination register");
6162 }
6163 llvm_unreachable("Unknown RegConstraintEqualityTy");
6164 }
6165 case Match_MissingFeature:
6166 return Error(Loc,
6167 "instruction requires a CPU feature not currently enabled");
6168 case Match_InvalidOperand:
6169 return Error(Loc, "invalid operand for instruction");
6170 case Match_InvalidSuffix:
6171 return Error(Loc, "invalid type suffix for instruction");
6172 case Match_InvalidCondCode:
6173 return Error(Loc, "expected AArch64 condition code");
6174 case Match_AddSubRegExtendSmall:
6175 return Error(Loc,
6176 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
6177 case Match_AddSubRegExtendLarge:
6178 return Error(Loc,
6179 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
6180 case Match_AddSubSecondSource:
6181 return Error(Loc,
6182 "expected compatible register, symbol or integer in range [0, 4095]");
6183 case Match_LogicalSecondSource:
6184 return Error(Loc, "expected compatible register or logical immediate");
6185 case Match_InvalidMovImm32Shift:
6186 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
6187 case Match_InvalidMovImm64Shift:
6188 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
6189 case Match_AddSubRegShift32:
6190 return Error(Loc,
6191 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
6192 case Match_AddSubRegShift64:
6193 return Error(Loc,
6194 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
6195 case Match_InvalidFPImm:
6196 return Error(Loc,
6197 "expected compatible register or floating-point constant");
6198 case Match_InvalidMemoryIndexedSImm6:
6199 return Error(Loc, "index must be an integer in range [-32, 31].");
6200 case Match_InvalidMemoryIndexedSImm5:
6201 return Error(Loc, "index must be an integer in range [-16, 15].");
6202 case Match_InvalidMemoryIndexed1SImm4:
6203 return Error(Loc, "index must be an integer in range [-8, 7].");
6204 case Match_InvalidMemoryIndexed2SImm4:
6205 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
6206 case Match_InvalidMemoryIndexed3SImm4:
6207 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
6208 case Match_InvalidMemoryIndexed4SImm4:
6209 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
6210 case Match_InvalidMemoryIndexed16SImm4:
6211 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
6212 case Match_InvalidMemoryIndexed32SImm4:
6213 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
6214 case Match_InvalidMemoryIndexed1SImm6:
6215 return Error(Loc, "index must be an integer in range [-32, 31].");
6216 case Match_InvalidMemoryIndexedSImm8:
6217 return Error(Loc, "index must be an integer in range [-128, 127].");
6218 case Match_InvalidMemoryIndexedSImm9:
6219 return Error(Loc, "index must be an integer in range [-256, 255].");
6220 case Match_InvalidMemoryIndexed16SImm9:
6221 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
6222 case Match_InvalidMemoryIndexed8SImm10:
6223 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
6224 case Match_InvalidMemoryIndexed4SImm7:
6225 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
6226 case Match_InvalidMemoryIndexed8SImm7:
6227 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
6228 case Match_InvalidMemoryIndexed16SImm7:
6229 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
6230 case Match_InvalidMemoryIndexed8UImm5:
6231 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
6232 case Match_InvalidMemoryIndexed8UImm3:
6233 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
6234 case Match_InvalidMemoryIndexed4UImm5:
6235 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
6236 case Match_InvalidMemoryIndexed2UImm5:
6237 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
6238 case Match_InvalidMemoryIndexed8UImm6:
6239 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
6240 case Match_InvalidMemoryIndexed16UImm6:
6241 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
6242 case Match_InvalidMemoryIndexed4UImm6:
6243 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
6244 case Match_InvalidMemoryIndexed2UImm6:
6245 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
6246 case Match_InvalidMemoryIndexed1UImm6:
6247 return Error(Loc, "index must be in range [0, 63].");
6248 case Match_InvalidMemoryWExtend8:
6249 return Error(Loc,
6250 "expected 'uxtw' or 'sxtw' with optional shift of #0");
6251 case Match_InvalidMemoryWExtend16:
6252 return Error(Loc,
6253 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
6254 case Match_InvalidMemoryWExtend32:
6255 return Error(Loc,
6256 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
6257 case Match_InvalidMemoryWExtend64:
6258 return Error(Loc,
6259 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
6260 case Match_InvalidMemoryWExtend128:
6261 return Error(Loc,
6262 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
6263 case Match_InvalidMemoryXExtend8:
6264 return Error(Loc,
6265 "expected 'lsl' or 'sxtx' with optional shift of #0");
6266 case Match_InvalidMemoryXExtend16:
6267 return Error(Loc,
6268 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
6269 case Match_InvalidMemoryXExtend32:
6270 return Error(Loc,
6271 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
6272 case Match_InvalidMemoryXExtend64:
6273 return Error(Loc,
6274 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
6275 case Match_InvalidMemoryXExtend128:
6276 return Error(Loc,
6277 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
6278 case Match_InvalidMemoryIndexed1:
6279 return Error(Loc, "index must be an integer in range [0, 4095].");
6280 case Match_InvalidMemoryIndexed2:
6281 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
6282 case Match_InvalidMemoryIndexed4:
6283 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
6284 case Match_InvalidMemoryIndexed8:
6285 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
6286 case Match_InvalidMemoryIndexed16:
6287 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
6288 case Match_InvalidImm0_0:
6289 return Error(Loc, "immediate must be 0.");
6290 case Match_InvalidImm0_1:
6291 return Error(Loc, "immediate must be an integer in range [0, 1].");
6292 case Match_InvalidImm0_3:
6293 return Error(Loc, "immediate must be an integer in range [0, 3].");
6294 case Match_InvalidImm0_7:
6295 return Error(Loc, "immediate must be an integer in range [0, 7].");
6296 case Match_InvalidImm0_15:
6297 return Error(Loc, "immediate must be an integer in range [0, 15].");
6298 case Match_InvalidImm0_31:
6299 return Error(Loc, "immediate must be an integer in range [0, 31].");
6300 case Match_InvalidImm0_63:
6301 return Error(Loc, "immediate must be an integer in range [0, 63].");
6302 case Match_InvalidImm0_127:
6303 return Error(Loc, "immediate must be an integer in range [0, 127].");
6304 case Match_InvalidImm0_255:
6305 return Error(Loc, "immediate must be an integer in range [0, 255].");
6306 case Match_InvalidImm0_65535:
6307 return Error(Loc, "immediate must be an integer in range [0, 65535].");
6308 case Match_InvalidImm1_8:
6309 return Error(Loc, "immediate must be an integer in range [1, 8].");
6310 case Match_InvalidImm1_16:
6311 return Error(Loc, "immediate must be an integer in range [1, 16].");
6312 case Match_InvalidImm1_32:
6313 return Error(Loc, "immediate must be an integer in range [1, 32].");
6314 case Match_InvalidImm1_64:
6315 return Error(Loc, "immediate must be an integer in range [1, 64].");
6316 case Match_InvalidImmM1_62:
6317 return Error(Loc, "immediate must be an integer in range [-1, 62].");
6318 case Match_InvalidMemoryIndexedRange2UImm0:
6319 return Error(Loc, "vector select offset must be the immediate range 0:1.");
6320 case Match_InvalidMemoryIndexedRange2UImm1:
6321 return Error(Loc, "vector select offset must be an immediate range of the "
6322 "form <immf>:<imml>, where the first "
6323 "immediate is a multiple of 2 in the range [0, 2], and "
6324 "the second immediate is immf + 1.");
6325 case Match_InvalidMemoryIndexedRange2UImm2:
6326 case Match_InvalidMemoryIndexedRange2UImm3:
6327 return Error(
6328 Loc,
6329 "vector select offset must be an immediate range of the form "
6330 "<immf>:<imml>, "
6331 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6332 "[0, 14] "
6333 "depending on the instruction, and the second immediate is immf + 1.");
6334 case Match_InvalidMemoryIndexedRange4UImm0:
6335 return Error(Loc, "vector select offset must be the immediate range 0:3.");
6336 case Match_InvalidMemoryIndexedRange4UImm1:
6337 case Match_InvalidMemoryIndexedRange4UImm2:
6338 return Error(
6339 Loc,
6340 "vector select offset must be an immediate range of the form "
6341 "<immf>:<imml>, "
6342 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6343 "[0, 12] "
6344 "depending on the instruction, and the second immediate is immf + 3.");
6345 case Match_InvalidSVEAddSubImm8:
6346 return Error(Loc, "immediate must be an integer in range [0, 255]"
6347 " with a shift amount of 0");
6348 case Match_InvalidSVEAddSubImm16:
6349 case Match_InvalidSVEAddSubImm32:
6350 case Match_InvalidSVEAddSubImm64:
6351 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
6352 "multiple of 256 in range [256, 65280]");
6353 case Match_InvalidSVECpyImm8:
6354 return Error(Loc, "immediate must be an integer in range [-128, 255]"
6355 " with a shift amount of 0");
6356 case Match_InvalidSVECpyImm16:
6357 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6358 "multiple of 256 in range [-32768, 65280]");
6359 case Match_InvalidSVECpyImm32:
6360 case Match_InvalidSVECpyImm64:
6361 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6362 "multiple of 256 in range [-32768, 32512]");
6363 case Match_InvalidIndexRange0_0:
6364 return Error(Loc, "expected lane specifier '[0]'");
6365 case Match_InvalidIndexRange1_1:
6366 return Error(Loc, "expected lane specifier '[1]'");
6367 case Match_InvalidIndexRange0_15:
6368 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6369 case Match_InvalidIndexRange0_7:
6370 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6371 case Match_InvalidIndexRange0_3:
6372 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6373 case Match_InvalidIndexRange0_1:
6374 return Error(Loc, "vector lane must be an integer in range [0, 1].");
6375 case Match_InvalidSVEIndexRange0_63:
6376 return Error(Loc, "vector lane must be an integer in range [0, 63].");
6377 case Match_InvalidSVEIndexRange0_31:
6378 return Error(Loc, "vector lane must be an integer in range [0, 31].");
6379 case Match_InvalidSVEIndexRange0_15:
6380 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6381 case Match_InvalidSVEIndexRange0_7:
6382 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6383 case Match_InvalidSVEIndexRange0_3:
6384 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6385 case Match_InvalidLabel:
6386 return Error(Loc, "expected label or encodable integer pc offset");
6387 case Match_MRS:
6388 return Error(Loc, "expected readable system register");
6389 case Match_MSR:
6390 case Match_InvalidSVCR:
6391 return Error(Loc, "expected writable system register or pstate");
6392 case Match_InvalidComplexRotationEven:
6393 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
6394 case Match_InvalidComplexRotationOdd:
6395 return Error(Loc, "complex rotation must be 90 or 270.");
6396 case Match_MnemonicFail: {
6397 std::string Suggestion = AArch64MnemonicSpellCheck(
6398 ((AArch64Operand &)*Operands[0]).getToken(),
6399 ComputeAvailableFeatures(STI->getFeatureBits()));
6400 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
6401 }
6402 case Match_InvalidGPR64shifted8:
6403 return Error(Loc, "register must be x0..x30 or xzr, without shift");
6404 case Match_InvalidGPR64shifted16:
6405 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6406 case Match_InvalidGPR64shifted32:
6407 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6408 case Match_InvalidGPR64shifted64:
6409 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6410 case Match_InvalidGPR64shifted128:
6411 return Error(
6412 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6413 case Match_InvalidGPR64NoXZRshifted8:
6414 return Error(Loc, "register must be x0..x30 without shift");
6415 case Match_InvalidGPR64NoXZRshifted16:
6416 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
6417 case Match_InvalidGPR64NoXZRshifted32:
6418 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
6419 case Match_InvalidGPR64NoXZRshifted64:
6420 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
6421 case Match_InvalidGPR64NoXZRshifted128:
6422 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
6423 case Match_InvalidZPR32UXTW8:
6424 case Match_InvalidZPR32SXTW8:
6425 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6426 case Match_InvalidZPR32UXTW16:
6427 case Match_InvalidZPR32SXTW16:
6428 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6429 case Match_InvalidZPR32UXTW32:
6430 case Match_InvalidZPR32SXTW32:
6431 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6432 case Match_InvalidZPR32UXTW64:
6433 case Match_InvalidZPR32SXTW64:
6434 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6435 case Match_InvalidZPR64UXTW8:
6436 case Match_InvalidZPR64SXTW8:
6437 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6438 case Match_InvalidZPR64UXTW16:
6439 case Match_InvalidZPR64SXTW16:
6440 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6441 case Match_InvalidZPR64UXTW32:
6442 case Match_InvalidZPR64SXTW32:
6443 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6444 case Match_InvalidZPR64UXTW64:
6445 case Match_InvalidZPR64SXTW64:
6446 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6447 case Match_InvalidZPR32LSL8:
6448 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
6449 case Match_InvalidZPR32LSL16:
6450 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6451 case Match_InvalidZPR32LSL32:
6452 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6453 case Match_InvalidZPR32LSL64:
6454 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6455 case Match_InvalidZPR64LSL8:
6456 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
6457 case Match_InvalidZPR64LSL16:
6458 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6459 case Match_InvalidZPR64LSL32:
6460 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6461 case Match_InvalidZPR64LSL64:
6462 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6463 case Match_InvalidZPR0:
6464 return Error(Loc, "expected register without element width suffix");
6465 case Match_InvalidZPR8:
6466 case Match_InvalidZPR16:
6467 case Match_InvalidZPR32:
6468 case Match_InvalidZPR64:
6469 case Match_InvalidZPR128:
6470 return Error(Loc, "invalid element width");
6471 case Match_InvalidZPR_3b8:
6472 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
6473 case Match_InvalidZPR_3b16:
6474 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
6475 case Match_InvalidZPR_3b32:
6476 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
6477 case Match_InvalidZPR_4b8:
6478 return Error(Loc,
6479 "Invalid restricted vector register, expected z0.b..z15.b");
6480 case Match_InvalidZPR_4b16:
6481 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
6482 case Match_InvalidZPR_4b32:
6483 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
6484 case Match_InvalidZPR_4b64:
6485 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
6486 case Match_InvalidZPRMul2_Lo8:
6487 return Error(Loc, "Invalid restricted vector register, expected even "
6488 "register in z0.b..z14.b");
6489 case Match_InvalidZPRMul2_Hi8:
6490 return Error(Loc, "Invalid restricted vector register, expected even "
6491 "register in z16.b..z30.b");
6492 case Match_InvalidZPRMul2_Lo16:
6493 return Error(Loc, "Invalid restricted vector register, expected even "
6494 "register in z0.h..z14.h");
6495 case Match_InvalidZPRMul2_Hi16:
6496 return Error(Loc, "Invalid restricted vector register, expected even "
6497 "register in z16.h..z30.h");
6498 case Match_InvalidZPRMul2_Lo32:
6499 return Error(Loc, "Invalid restricted vector register, expected even "
6500 "register in z0.s..z14.s");
6501 case Match_InvalidZPRMul2_Hi32:
6502 return Error(Loc, "Invalid restricted vector register, expected even "
6503 "register in z16.s..z30.s");
6504 case Match_InvalidZPRMul2_Lo64:
6505 return Error(Loc, "Invalid restricted vector register, expected even "
6506 "register in z0.d..z14.d");
6507 case Match_InvalidZPRMul2_Hi64:
6508 return Error(Loc, "Invalid restricted vector register, expected even "
6509 "register in z16.d..z30.d");
6510 case Match_InvalidZPR_K0:
6511 return Error(Loc, "invalid restricted vector register, expected register "
6512 "in z20..z23 or z28..z31");
6513 case Match_InvalidSVEPattern:
6514 return Error(Loc, "invalid predicate pattern");
6515 case Match_InvalidSVEPPRorPNRAnyReg:
6516 case Match_InvalidSVEPPRorPNRBReg:
6517 case Match_InvalidSVEPredicateAnyReg:
6518 case Match_InvalidSVEPredicateBReg:
6519 case Match_InvalidSVEPredicateHReg:
6520 case Match_InvalidSVEPredicateSReg:
6521 case Match_InvalidSVEPredicateDReg:
6522 return Error(Loc, "invalid predicate register.");
6523 case Match_InvalidSVEPredicate3bAnyReg:
6524 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6525 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6526 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6527 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6528 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6529 return Error(Loc, "Invalid predicate register, expected PN in range "
6530 "pn8..pn15 with element suffix.");
6531 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6532 return Error(Loc, "invalid restricted predicate-as-counter register "
6533 "expected pn8..pn15");
6534 case Match_InvalidSVEPNPredicateBReg:
6535 case Match_InvalidSVEPNPredicateHReg:
6536 case Match_InvalidSVEPNPredicateSReg:
6537 case Match_InvalidSVEPNPredicateDReg:
6538 return Error(Loc, "Invalid predicate register, expected PN in range "
6539 "pn0..pn15 with element suffix.");
6540 case Match_InvalidSVEVecLenSpecifier:
6541 return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6542 case Match_InvalidSVEPredicateListMul2x8:
6543 case Match_InvalidSVEPredicateListMul2x16:
6544 case Match_InvalidSVEPredicateListMul2x32:
6545 case Match_InvalidSVEPredicateListMul2x64:
6546 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6547 "predicate registers, where the first vector is a multiple of 2 "
6548 "and with correct element type");
6549 case Match_InvalidSVEExactFPImmOperandHalfOne:
6550 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6551 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6552 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6553 case Match_InvalidSVEExactFPImmOperandZeroOne:
6554 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6555 case Match_InvalidMatrixTileVectorH8:
6556 case Match_InvalidMatrixTileVectorV8:
6557 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6558 case Match_InvalidMatrixTileVectorH16:
6559 case Match_InvalidMatrixTileVectorV16:
6560 return Error(Loc,
6561 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6562 case Match_InvalidMatrixTileVectorH32:
6563 case Match_InvalidMatrixTileVectorV32:
6564 return Error(Loc,
6565 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6566 case Match_InvalidMatrixTileVectorH64:
6567 case Match_InvalidMatrixTileVectorV64:
6568 return Error(Loc,
6569 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6570 case Match_InvalidMatrixTileVectorH128:
6571 case Match_InvalidMatrixTileVectorV128:
6572 return Error(Loc,
6573 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6574 case Match_InvalidMatrixTile16:
6575 return Error(Loc, "invalid matrix operand, expected za[0-1].h");
6576 case Match_InvalidMatrixTile32:
6577 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6578 case Match_InvalidMatrixTile64:
6579 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6580 case Match_InvalidMatrix:
6581 return Error(Loc, "invalid matrix operand, expected za");
6582 case Match_InvalidMatrix8:
6583 return Error(Loc, "invalid matrix operand, expected suffix .b");
6584 case Match_InvalidMatrix16:
6585 return Error(Loc, "invalid matrix operand, expected suffix .h");
6586 case Match_InvalidMatrix32:
6587 return Error(Loc, "invalid matrix operand, expected suffix .s");
6588 case Match_InvalidMatrix64:
6589 return Error(Loc, "invalid matrix operand, expected suffix .d");
6590 case Match_InvalidMatrixIndexGPR32_12_15:
6591 return Error(Loc, "operand must be a register in range [w12, w15]");
6592 case Match_InvalidMatrixIndexGPR32_8_11:
6593 return Error(Loc, "operand must be a register in range [w8, w11]");
6594 case Match_InvalidSVEVectorList2x8Mul2:
6595 case Match_InvalidSVEVectorList2x16Mul2:
6596 case Match_InvalidSVEVectorList2x32Mul2:
6597 case Match_InvalidSVEVectorList2x64Mul2:
6598 case Match_InvalidSVEVectorList2x128Mul2:
6599 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6600 "SVE vectors, where the first vector is a multiple of 2 "
6601 "and with matching element types");
6602 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6603 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6604 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6605 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6606 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6607 "SVE vectors in the range z0-z14, where the first vector "
6608 "is a multiple of 2 "
6609 "and with matching element types");
6610 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6611 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6612 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6613 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6614 return Error(Loc,
6615 "Invalid vector list, expected list with 2 consecutive "
6616 "SVE vectors in the range z16-z30, where the first vector "
6617 "is a multiple of 2 "
6618 "and with matching element types");
6619 case Match_InvalidSVEVectorList4x8Mul4:
6620 case Match_InvalidSVEVectorList4x16Mul4:
6621 case Match_InvalidSVEVectorList4x32Mul4:
6622 case Match_InvalidSVEVectorList4x64Mul4:
6623 case Match_InvalidSVEVectorList4x128Mul4:
6624 return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6625 "SVE vectors, where the first vector is a multiple of 4 "
6626 "and with matching element types");
6627 case Match_InvalidLookupTable:
6628 return Error(Loc, "Invalid lookup table, expected zt0");
6629 case Match_InvalidSVEVectorListStrided2x8:
6630 case Match_InvalidSVEVectorListStrided2x16:
6631 case Match_InvalidSVEVectorListStrided2x32:
6632 case Match_InvalidSVEVectorListStrided2x64:
6633 return Error(
6634 Loc,
6635 "Invalid vector list, expected list with each SVE vector in the list "
6636 "8 registers apart, and the first register in the range [z0, z7] or "
6637 "[z16, z23] and with correct element type");
6638 case Match_InvalidSVEVectorListStrided4x8:
6639 case Match_InvalidSVEVectorListStrided4x16:
6640 case Match_InvalidSVEVectorListStrided4x32:
6641 case Match_InvalidSVEVectorListStrided4x64:
6642 return Error(
6643 Loc,
6644 "Invalid vector list, expected list with each SVE vector in the list "
6645 "4 registers apart, and the first register in the range [z0, z3] or "
6646 "[z16, z19] and with correct element type");
6647 case Match_AddSubLSLImm3ShiftLarge:
6648 return Error(Loc,
6649 "expected 'lsl' with optional integer in range [0, 7]");
6650 default:
6651 llvm_unreachable("unexpected error code!");
6652 }
6653}
6654
6655static const char *getSubtargetFeatureName(uint64_t Val);
6656
6657bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6658 OperandVector &Operands,
6659 MCStreamer &Out,
6661 bool MatchingInlineAsm) {
6662 assert(!Operands.empty() && "Unexpected empty operand list!");
6663 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6664 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6665
6666 StringRef Tok = Op.getToken();
6667 unsigned NumOperands = Operands.size();
6668
6669 if (NumOperands == 4 && Tok == "lsl") {
6670 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6671 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6672 if (Op2.isScalarReg() && Op3.isImm()) {
6673 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6674 if (Op3CE) {
6675 uint64_t Op3Val = Op3CE->getValue();
6676 uint64_t NewOp3Val = 0;
6677 uint64_t NewOp4Val = 0;
6678 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6679 Op2.getReg())) {
6680 NewOp3Val = (32 - Op3Val) & 0x1f;
6681 NewOp4Val = 31 - Op3Val;
6682 } else {
6683 NewOp3Val = (64 - Op3Val) & 0x3f;
6684 NewOp4Val = 63 - Op3Val;
6685 }
6686
6687 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6688 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6689
6690 Operands[0] =
6691 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6692 Operands.push_back(AArch64Operand::CreateImm(
6693 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6694 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6695 Op3.getEndLoc(), getContext());
6696 }
6697 }
6698 } else if (NumOperands == 4 && Tok == "bfc") {
6699 // FIXME: Horrible hack to handle BFC->BFM alias.
6700 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6701 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6702 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6703
6704 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6705 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6706 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6707
6708 if (LSBCE && WidthCE) {
6709 uint64_t LSB = LSBCE->getValue();
6710 uint64_t Width = WidthCE->getValue();
6711
6712 uint64_t RegWidth = 0;
6713 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6714 Op1.getReg()))
6715 RegWidth = 64;
6716 else
6717 RegWidth = 32;
6718
6719 if (LSB >= RegWidth)
6720 return Error(LSBOp.getStartLoc(),
6721 "expected integer in range [0, 31]");
6722 if (Width < 1 || Width > RegWidth)
6723 return Error(WidthOp.getStartLoc(),
6724 "expected integer in range [1, 32]");
6725
6726 uint64_t ImmR = 0;
6727 if (RegWidth == 32)
6728 ImmR = (32 - LSB) & 0x1f;
6729 else
6730 ImmR = (64 - LSB) & 0x3f;
6731
6732 uint64_t ImmS = Width - 1;
6733
6734 if (ImmR != 0 && ImmS >= ImmR)
6735 return Error(WidthOp.getStartLoc(),
6736 "requested insert overflows register");
6737
6738 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6739 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6740 Operands[0] =
6741 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6742 Operands[2] = AArch64Operand::CreateReg(
6743 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6744 SMLoc(), SMLoc(), getContext());
6745 Operands[3] = AArch64Operand::CreateImm(
6746 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6747 Operands.emplace_back(
6748 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6749 WidthOp.getEndLoc(), getContext()));
6750 }
6751 }
6752 } else if (NumOperands == 5) {
6753 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6754 // UBFIZ -> UBFM aliases.
6755 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6756 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6757 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6758 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6759
6760 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6761 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6762 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6763
6764 if (Op3CE && Op4CE) {
6765 uint64_t Op3Val = Op3CE->getValue();
6766 uint64_t Op4Val = Op4CE->getValue();
6767
6768 uint64_t RegWidth = 0;
6769 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6770 Op1.getReg()))
6771 RegWidth = 64;
6772 else
6773 RegWidth = 32;
6774
6775 if (Op3Val >= RegWidth)
6776 return Error(Op3.getStartLoc(),
6777 "expected integer in range [0, 31]");
6778 if (Op4Val < 1 || Op4Val > RegWidth)
6779 return Error(Op4.getStartLoc(),
6780 "expected integer in range [1, 32]");
6781
6782 uint64_t NewOp3Val = 0;
6783 if (RegWidth == 32)
6784 NewOp3Val = (32 - Op3Val) & 0x1f;
6785 else
6786 NewOp3Val = (64 - Op3Val) & 0x3f;
6787
6788 uint64_t NewOp4Val = Op4Val - 1;
6789
6790 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6791 return Error(Op4.getStartLoc(),
6792 "requested insert overflows register");
6793
6794 const MCExpr *NewOp3 =
6795 MCConstantExpr::create(NewOp3Val, getContext());
6796 const MCExpr *NewOp4 =
6797 MCConstantExpr::create(NewOp4Val, getContext());
6798 Operands[3] = AArch64Operand::CreateImm(
6799 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6800 Operands[4] = AArch64Operand::CreateImm(
6801 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6802 if (Tok == "bfi")
6803 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6804 getContext());
6805 else if (Tok == "sbfiz")
6806 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6807 getContext());
6808 else if (Tok == "ubfiz")
6809 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6810 getContext());
6811 else
6812 llvm_unreachable("No valid mnemonic for alias?");
6813 }
6814 }
6815
6816 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6817 // UBFX -> UBFM aliases.
6818 } else if (NumOperands == 5 &&
6819 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6820 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6821 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6822 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6823
6824 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6825 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6826 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6827
6828 if (Op3CE && Op4CE) {
6829 uint64_t Op3Val = Op3CE->getValue();
6830 uint64_t Op4Val = Op4CE->getValue();
6831
6832 uint64_t RegWidth = 0;
6833 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6834 Op1.getReg()))
6835 RegWidth = 64;
6836 else
6837 RegWidth = 32;
6838
6839 if (Op3Val >= RegWidth)
6840 return Error(Op3.getStartLoc(),
6841 "expected integer in range [0, 31]");
6842 if (Op4Val < 1 || Op4Val > RegWidth)
6843 return Error(Op4.getStartLoc(),
6844 "expected integer in range [1, 32]");
6845
6846 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6847
6848 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6849 return Error(Op4.getStartLoc(),
6850 "requested extract overflows register");
6851
6852 const MCExpr *NewOp4 =
6853 MCConstantExpr::create(NewOp4Val, getContext());
6854 Operands[4] = AArch64Operand::CreateImm(
6855 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6856 if (Tok == "bfxil")
6857 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6858 getContext());
6859 else if (Tok == "sbfx")
6860 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6861 getContext());
6862 else if (Tok == "ubfx")
6863 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6864 getContext());
6865 else
6866 llvm_unreachable("No valid mnemonic for alias?");
6867 }
6868 }
6869 }
6870 }
6871
6872 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6873 // instruction for FP registers correctly in some rare circumstances. Convert
6874 // it to a safe instruction and warn (because silently changing someone's
6875 // assembly is rude).
6876 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6877 NumOperands == 4 && Tok == "movi") {
6878 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6879 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6880 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6881 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6882 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6883 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6884 if (Suffix.lower() == ".2d" &&
6885 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6886 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6887 " correctly on this CPU, converting to equivalent movi.16b");
6888 // Switch the suffix to .16b.
6889 unsigned Idx = Op1.isToken() ? 1 : 2;
6890 Operands[Idx] =
6891 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6892 }
6893 }
6894 }
6895
6896 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6897 // InstAlias can't quite handle this since the reg classes aren't
6898 // subclasses.
6899 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6900 // The source register can be Wn here, but the matcher expects a
6901 // GPR64. Twiddle it here if necessary.
6902 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6903 if (Op.isScalarReg()) {
6904 MCRegister Reg = getXRegFromWReg(Op.getReg());
6905 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6906 Op.getStartLoc(), Op.getEndLoc(),
6907 getContext());
6908 }
6909 }
6910 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6911 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6912 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6913 if (Op.isScalarReg() &&
6914 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6915 Op.getReg())) {
6916 // The source register can be Wn here, but the matcher expects a
6917 // GPR64. Twiddle it here if necessary.
6918 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6919 if (Op.isScalarReg()) {
6920 MCRegister Reg = getXRegFromWReg(Op.getReg());
6921 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6922 Op.getStartLoc(),
6923 Op.getEndLoc(), getContext());
6924 }
6925 }
6926 }
6927 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6928 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6929 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6930 if (Op.isScalarReg() &&
6931 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6932 Op.getReg())) {
6933 // The source register can be Wn here, but the matcher expects a
6934 // GPR32. Twiddle it here if necessary.
6935 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6936 if (Op.isScalarReg()) {
6937 MCRegister Reg = getWRegFromXReg(Op.getReg());
6938 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6939 Op.getStartLoc(),
6940 Op.getEndLoc(), getContext());
6941 }
6942 }
6943 }
6944
6945 MCInst Inst;
6946 FeatureBitset MissingFeatures;
6947 // First try to match against the secondary set of tables containing the
6948 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6949 unsigned MatchResult =
6950 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6951 MatchingInlineAsm, 1);
6952
6953 // If that fails, try against the alternate table containing long-form NEON:
6954 // "fadd v0.2s, v1.2s, v2.2s"
6955 if (MatchResult != Match_Success) {
6956 // But first, save the short-form match result: we can use it in case the
6957 // long-form match also fails.
6958 auto ShortFormNEONErrorInfo = ErrorInfo;
6959 auto ShortFormNEONMatchResult = MatchResult;
6960 auto ShortFormNEONMissingFeatures = MissingFeatures;
6961
6962 MatchResult =
6963 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6964 MatchingInlineAsm, 0);
6965
6966 // Now, both matches failed, and the long-form match failed on the mnemonic
6967 // suffix token operand. The short-form match failure is probably more
6968 // relevant: use it instead.
6969 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6970 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6971 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6972 MatchResult = ShortFormNEONMatchResult;
6973 ErrorInfo = ShortFormNEONErrorInfo;
6974 MissingFeatures = ShortFormNEONMissingFeatures;
6975 }
6976 }
6977
6978 switch (MatchResult) {
6979 case Match_Success: {
6980 // Perform range checking and other semantic validations
6981 SmallVector<SMLoc, 8> OperandLocs;
6982 NumOperands = Operands.size();
6983 for (unsigned i = 1; i < NumOperands; ++i)
6984 OperandLocs.push_back(Operands[i]->getStartLoc());
6985 if (validateInstruction(Inst, IDLoc, OperandLocs))
6986 return true;
6987
6988 Inst.setLoc(IDLoc);
6989 Out.emitInstruction(Inst, getSTI());
6990 return false;
6991 }
6992 case Match_MissingFeature: {
6993 assert(MissingFeatures.any() && "Unknown missing feature!");
6994 // Special case the error message for the very common case where only
6995 // a single subtarget feature is missing (neon, e.g.).
6996 std::string Msg = "instruction requires:";
6997 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6998 if (MissingFeatures[i]) {
6999 Msg += " ";
7000 Msg += getSubtargetFeatureName(i);
7001 }
7002 }
7003 return Error(IDLoc, Msg);
7004 }
7005 case Match_MnemonicFail:
7006 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
7007 case Match_InvalidOperand: {
7008 SMLoc ErrorLoc = IDLoc;
7009
7010 if (ErrorInfo != ~0ULL) {
7011 if (ErrorInfo >= Operands.size())
7012 return Error(IDLoc, "too few operands for instruction",
7013 SMRange(IDLoc, getTok().getLoc()));
7014
7015 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7016 if (ErrorLoc == SMLoc())
7017 ErrorLoc = IDLoc;
7018 }
7019 // If the match failed on a suffix token operand, tweak the diagnostic
7020 // accordingly.
7021 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
7022 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
7023 MatchResult = Match_InvalidSuffix;
7024
7025 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
7026 }
7027 case Match_InvalidTiedOperand:
7028 case Match_InvalidMemoryIndexed1:
7029 case Match_InvalidMemoryIndexed2:
7030 case Match_InvalidMemoryIndexed4:
7031 case Match_InvalidMemoryIndexed8:
7032 case Match_InvalidMemoryIndexed16:
7033 case Match_InvalidCondCode:
7034 case Match_AddSubLSLImm3ShiftLarge:
7035 case Match_AddSubRegExtendSmall:
7036 case Match_AddSubRegExtendLarge:
7037 case Match_AddSubSecondSource:
7038 case Match_LogicalSecondSource:
7039 case Match_AddSubRegShift32:
7040 case Match_AddSubRegShift64:
7041 case Match_InvalidMovImm32Shift:
7042 case Match_InvalidMovImm64Shift:
7043 case Match_InvalidFPImm:
7044 case Match_InvalidMemoryWExtend8:
7045 case Match_InvalidMemoryWExtend16:
7046 case Match_InvalidMemoryWExtend32:
7047 case Match_InvalidMemoryWExtend64:
7048 case Match_InvalidMemoryWExtend128:
7049 case Match_InvalidMemoryXExtend8:
7050 case Match_InvalidMemoryXExtend16:
7051 case Match_InvalidMemoryXExtend32:
7052 case Match_InvalidMemoryXExtend64:
7053 case Match_InvalidMemoryXExtend128:
7054 case Match_InvalidMemoryIndexed1SImm4:
7055 case Match_InvalidMemoryIndexed2SImm4:
7056 case Match_InvalidMemoryIndexed3SImm4:
7057 case Match_InvalidMemoryIndexed4SImm4:
7058 case Match_InvalidMemoryIndexed1SImm6:
7059 case Match_InvalidMemoryIndexed16SImm4:
7060 case Match_InvalidMemoryIndexed32SImm4:
7061 case Match_InvalidMemoryIndexed4SImm7:
7062 case Match_InvalidMemoryIndexed8SImm7:
7063 case Match_InvalidMemoryIndexed16SImm7:
7064 case Match_InvalidMemoryIndexed8UImm5:
7065 case Match_InvalidMemoryIndexed8UImm3:
7066 case Match_InvalidMemoryIndexed4UImm5:
7067 case Match_InvalidMemoryIndexed2UImm5:
7068 case Match_InvalidMemoryIndexed1UImm6:
7069 case Match_InvalidMemoryIndexed2UImm6:
7070 case Match_InvalidMemoryIndexed4UImm6:
7071 case Match_InvalidMemoryIndexed8UImm6:
7072 case Match_InvalidMemoryIndexed16UImm6:
7073 case Match_InvalidMemoryIndexedSImm6:
7074 case Match_InvalidMemoryIndexedSImm5:
7075 case Match_InvalidMemoryIndexedSImm8:
7076 case Match_InvalidMemoryIndexedSImm9:
7077 case Match_InvalidMemoryIndexed16SImm9:
7078 case Match_InvalidMemoryIndexed8SImm10:
7079 case Match_InvalidImm0_0:
7080 case Match_InvalidImm0_1:
7081 case Match_InvalidImm0_3:
7082 case Match_InvalidImm0_7:
7083 case Match_InvalidImm0_15:
7084 case Match_InvalidImm0_31:
7085 case Match_InvalidImm0_63:
7086 case Match_InvalidImm0_127:
7087 case Match_InvalidImm0_255:
7088 case Match_InvalidImm0_65535:
7089 case Match_InvalidImm1_8:
7090 case Match_InvalidImm1_16:
7091 case Match_InvalidImm1_32:
7092 case Match_InvalidImm1_64:
7093 case Match_InvalidImmM1_62:
7094 case Match_InvalidMemoryIndexedRange2UImm0:
7095 case Match_InvalidMemoryIndexedRange2UImm1:
7096 case Match_InvalidMemoryIndexedRange2UImm2:
7097 case Match_InvalidMemoryIndexedRange2UImm3:
7098 case Match_InvalidMemoryIndexedRange4UImm0:
7099 case Match_InvalidMemoryIndexedRange4UImm1:
7100 case Match_InvalidMemoryIndexedRange4UImm2:
7101 case Match_InvalidSVEAddSubImm8:
7102 case Match_InvalidSVEAddSubImm16:
7103 case Match_InvalidSVEAddSubImm32:
7104 case Match_InvalidSVEAddSubImm64:
7105 case Match_InvalidSVECpyImm8:
7106 case Match_InvalidSVECpyImm16:
7107 case Match_InvalidSVECpyImm32:
7108 case Match_InvalidSVECpyImm64:
7109 case Match_InvalidIndexRange0_0:
7110 case Match_InvalidIndexRange1_1:
7111 case Match_InvalidIndexRange0_15:
7112 case Match_InvalidIndexRange0_7:
7113 case Match_InvalidIndexRange0_3:
7114 case Match_InvalidIndexRange0_1:
7115 case Match_InvalidSVEIndexRange0_63:
7116 case Match_InvalidSVEIndexRange0_31:
7117 case Match_InvalidSVEIndexRange0_15:
7118 case Match_InvalidSVEIndexRange0_7:
7119 case Match_InvalidSVEIndexRange0_3:
7120 case Match_InvalidLabel:
7121 case Match_InvalidComplexRotationEven:
7122 case Match_InvalidComplexRotationOdd:
7123 case Match_InvalidGPR64shifted8:
7124 case Match_InvalidGPR64shifted16:
7125 case Match_InvalidGPR64shifted32:
7126 case Match_InvalidGPR64shifted64:
7127 case Match_InvalidGPR64shifted128:
7128 case Match_InvalidGPR64NoXZRshifted8:
7129 case Match_InvalidGPR64NoXZRshifted16:
7130 case Match_InvalidGPR64NoXZRshifted32:
7131 case Match_InvalidGPR64NoXZRshifted64:
7132 case Match_InvalidGPR64NoXZRshifted128:
7133 case Match_InvalidZPR32UXTW8:
7134 case Match_InvalidZPR32UXTW16:
7135 case Match_InvalidZPR32UXTW32:
7136 case Match_InvalidZPR32UXTW64:
7137 case Match_InvalidZPR32SXTW8:
7138 case Match_InvalidZPR32SXTW16:
7139 case Match_InvalidZPR32SXTW32:
7140 case Match_InvalidZPR32SXTW64:
7141 case Match_InvalidZPR64UXTW8:
7142 case Match_InvalidZPR64SXTW8:
7143 case Match_InvalidZPR64UXTW16:
7144 case Match_InvalidZPR64SXTW16:
7145 case Match_InvalidZPR64UXTW32:
7146 case Match_InvalidZPR64SXTW32:
7147 case Match_InvalidZPR64UXTW64:
7148 case Match_InvalidZPR64SXTW64:
7149 case Match_InvalidZPR32LSL8:
7150 case Match_InvalidZPR32LSL16:
7151 case Match_InvalidZPR32LSL32:
7152 case Match_InvalidZPR32LSL64:
7153 case Match_InvalidZPR64LSL8:
7154 case Match_InvalidZPR64LSL16:
7155 case Match_InvalidZPR64LSL32:
7156 case Match_InvalidZPR64LSL64:
7157 case Match_InvalidZPR0:
7158 case Match_InvalidZPR8:
7159 case Match_InvalidZPR16:
7160 case Match_InvalidZPR32:
7161 case Match_InvalidZPR64:
7162 case Match_InvalidZPR128:
7163 case Match_InvalidZPR_3b8:
7164 case Match_InvalidZPR_3b16:
7165 case Match_InvalidZPR_3b32:
7166 case Match_InvalidZPR_4b8:
7167 case Match_InvalidZPR_4b16:
7168 case Match_InvalidZPR_4b32:
7169 case Match_InvalidZPR_4b64:
7170 case Match_InvalidSVEPPRorPNRAnyReg:
7171 case Match_InvalidSVEPPRorPNRBReg:
7172 case Match_InvalidSVEPredicateAnyReg:
7173 case Match_InvalidSVEPattern:
7174 case Match_InvalidSVEVecLenSpecifier:
7175 case Match_InvalidSVEPredicateBReg:
7176 case Match_InvalidSVEPredicateHReg:
7177 case Match_InvalidSVEPredicateSReg:
7178 case Match_InvalidSVEPredicateDReg:
7179 case Match_InvalidSVEPredicate3bAnyReg:
7180 case Match_InvalidSVEPNPredicateB_p8to15Reg:
7181 case Match_InvalidSVEPNPredicateH_p8to15Reg:
7182 case Match_InvalidSVEPNPredicateS_p8to15Reg:
7183 case Match_InvalidSVEPNPredicateD_p8to15Reg:
7184 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
7185 case Match_InvalidSVEPNPredicateBReg:
7186 case Match_InvalidSVEPNPredicateHReg:
7187 case Match_InvalidSVEPNPredicateSReg:
7188 case Match_InvalidSVEPNPredicateDReg:
7189 case Match_InvalidSVEPredicateListMul2x8:
7190 case Match_InvalidSVEPredicateListMul2x16:
7191 case Match_InvalidSVEPredicateListMul2x32:
7192 case Match_InvalidSVEPredicateListMul2x64:
7193 case Match_InvalidSVEExactFPImmOperandHalfOne:
7194 case Match_InvalidSVEExactFPImmOperandHalfTwo:
7195 case Match_InvalidSVEExactFPImmOperandZeroOne:
7196 case Match_InvalidMatrixTile16:
7197 case Match_InvalidMatrixTile32:
7198 case Match_InvalidMatrixTile64:
7199 case Match_InvalidMatrix:
7200 case Match_InvalidMatrix8:
7201 case Match_InvalidMatrix16:
7202 case Match_InvalidMatrix32:
7203 case Match_InvalidMatrix64:
7204 case Match_InvalidMatrixTileVectorH8:
7205 case Match_InvalidMatrixTileVectorH16:
7206 case Match_InvalidMatrixTileVectorH32:
7207 case Match_InvalidMatrixTileVectorH64:
7208 case Match_InvalidMatrixTileVectorH128:
7209 case Match_InvalidMatrixTileVectorV8:
7210 case Match_InvalidMatrixTileVectorV16:
7211 case Match_InvalidMatrixTileVectorV32:
7212 case Match_InvalidMatrixTileVectorV64:
7213 case Match_InvalidMatrixTileVectorV128:
7214 case Match_InvalidSVCR:
7215 case Match_InvalidMatrixIndexGPR32_12_15:
7216 case Match_InvalidMatrixIndexGPR32_8_11:
7217 case Match_InvalidLookupTable:
7218 case Match_InvalidZPRMul2_Lo8:
7219 case Match_InvalidZPRMul2_Hi8:
7220 case Match_InvalidZPRMul2_Lo16:
7221 case Match_InvalidZPRMul2_Hi16:
7222 case Match_InvalidZPRMul2_Lo32:
7223 case Match_InvalidZPRMul2_Hi32:
7224 case Match_InvalidZPRMul2_Lo64:
7225 case Match_InvalidZPRMul2_Hi64:
7226 case Match_InvalidZPR_K0:
7227 case Match_InvalidSVEVectorList2x8Mul2:
7228 case Match_InvalidSVEVectorList2x16Mul2:
7229 case Match_InvalidSVEVectorList2x32Mul2:
7230 case Match_InvalidSVEVectorList2x64Mul2:
7231 case Match_InvalidSVEVectorList2x128Mul2:
7232 case Match_InvalidSVEVectorList4x8Mul4:
7233 case Match_InvalidSVEVectorList4x16Mul4:
7234 case Match_InvalidSVEVectorList4x32Mul4:
7235 case Match_InvalidSVEVectorList4x64Mul4:
7236 case Match_InvalidSVEVectorList4x128Mul4:
7237 case Match_InvalidSVEVectorList2x8Mul2_Lo:
7238 case Match_InvalidSVEVectorList2x16Mul2_Lo:
7239 case Match_InvalidSVEVectorList2x32Mul2_Lo:
7240 case Match_InvalidSVEVectorList2x64Mul2_Lo:
7241 case Match_InvalidSVEVectorList2x8Mul2_Hi:
7242 case Match_InvalidSVEVectorList2x16Mul2_Hi:
7243 case Match_InvalidSVEVectorList2x32Mul2_Hi:
7244 case Match_InvalidSVEVectorList2x64Mul2_Hi:
7245 case Match_InvalidSVEVectorListStrided2x8:
7246 case Match_InvalidSVEVectorListStrided2x16:
7247 case Match_InvalidSVEVectorListStrided2x32:
7248 case Match_InvalidSVEVectorListStrided2x64:
7249 case Match_InvalidSVEVectorListStrided4x8:
7250 case Match_InvalidSVEVectorListStrided4x16:
7251 case Match_InvalidSVEVectorListStrided4x32:
7252 case Match_InvalidSVEVectorListStrided4x64:
7253 case Match_MSR:
7254 case Match_MRS: {
7255 if (ErrorInfo >= Operands.size())
7256 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
7257 // Any time we get here, there's nothing fancy to do. Just get the
7258 // operand SMLoc and display the diagnostic.
7259 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7260 if (ErrorLoc == SMLoc())
7261 ErrorLoc = IDLoc;
7262 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
7263 }
7264 }
7265
7266 llvm_unreachable("Implement any new match types added!");
7267}
7268
7269/// ParseDirective parses the arm specific directives
7270bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
7271 const MCContext::Environment Format = getContext().getObjectFileType();
7272 bool IsMachO = Format == MCContext::IsMachO;
7273 bool IsCOFF = Format == MCContext::IsCOFF;
7274 bool IsELF = Format == MCContext::IsELF;
7275
7276 auto IDVal = DirectiveID.getIdentifier().lower();
7277 SMLoc Loc = DirectiveID.getLoc();
7278 if (IDVal == ".arch")
7279 parseDirectiveArch(Loc);
7280 else if (IDVal == ".cpu")
7281 parseDirectiveCPU(Loc);
7282 else if (IDVal == ".tlsdesccall")
7283 parseDirectiveTLSDescCall(Loc);
7284 else if (IDVal == ".ltorg" || IDVal == ".pool")
7285 parseDirectiveLtorg(Loc);
7286 else if (IDVal == ".unreq")
7287 parseDirectiveUnreq(Loc);
7288 else if (IDVal == ".inst")
7289 parseDirectiveInst(Loc);
7290 else if (IDVal == ".cfi_negate_ra_state")
7291 parseDirectiveCFINegateRAState();
7292 else if (IDVal == ".cfi_negate_ra_state_with_pc")
7293 parseDirectiveCFINegateRAStateWithPC();
7294 else if (IDVal == ".cfi_b_key_frame")
7295 parseDirectiveCFIBKeyFrame();
7296 else if (IDVal == ".cfi_mte_tagged_frame")
7297 parseDirectiveCFIMTETaggedFrame();
7298 else if (IDVal == ".arch_extension")
7299 parseDirectiveArchExtension(Loc);
7300 else if (IDVal == ".variant_pcs")
7301 parseDirectiveVariantPCS(Loc);
7302 else if (IsMachO) {
7303 if (IDVal == MCLOHDirectiveName())
7304 parseDirectiveLOH(IDVal, Loc);
7305 else
7306 return true;
7307 } else if (IsCOFF) {
7308 if (IDVal == ".seh_stackalloc")
7309 parseDirectiveSEHAllocStack(Loc);
7310 else if (IDVal == ".seh_endprologue")
7311 parseDirectiveSEHPrologEnd(Loc);
7312 else if (IDVal == ".seh_save_r19r20_x")
7313 parseDirectiveSEHSaveR19R20X(Loc);
7314 else if (IDVal == ".seh_save_fplr")
7315 parseDirectiveSEHSaveFPLR(Loc);
7316 else if (IDVal == ".seh_save_fplr_x")
7317 parseDirectiveSEHSaveFPLRX(Loc);
7318 else if (IDVal == ".seh_save_reg")
7319 parseDirectiveSEHSaveReg(Loc);
7320 else if (IDVal == ".seh_save_reg_x")
7321 parseDirectiveSEHSaveRegX(Loc);
7322 else if (IDVal == ".seh_save_regp")
7323 parseDirectiveSEHSaveRegP(Loc);
7324 else if (IDVal == ".seh_save_regp_x")
7325 parseDirectiveSEHSaveRegPX(Loc);
7326 else if (IDVal == ".seh_save_lrpair")
7327 parseDirectiveSEHSaveLRPair(Loc);
7328 else if (IDVal == ".seh_save_freg")
7329 parseDirectiveSEHSaveFReg(Loc);
7330 else if (IDVal == ".seh_save_freg_x")
7331 parseDirectiveSEHSaveFRegX(Loc);
7332 else if (IDVal == ".seh_save_fregp")
7333 parseDirectiveSEHSaveFRegP(Loc);
7334 else if (IDVal == ".seh_save_fregp_x")
7335 parseDirectiveSEHSaveFRegPX(Loc);
7336 else if (IDVal == ".seh_set_fp")
7337 parseDirectiveSEHSetFP(Loc);
7338 else if (IDVal == ".seh_add_fp")
7339 parseDirectiveSEHAddFP(Loc);
7340 else if (IDVal == ".seh_nop")
7341 parseDirectiveSEHNop(Loc);
7342 else if (IDVal == ".seh_save_next")
7343 parseDirectiveSEHSaveNext(Loc);
7344 else if (IDVal == ".seh_startepilogue")
7345 parseDirectiveSEHEpilogStart(Loc);
7346 else if (IDVal == ".seh_endepilogue")
7347 parseDirectiveSEHEpilogEnd(Loc);
7348 else if (IDVal == ".seh_trap_frame")
7349 parseDirectiveSEHTrapFrame(Loc);
7350 else if (IDVal == ".seh_pushframe")
7351 parseDirectiveSEHMachineFrame(Loc);
7352 else if (IDVal == ".seh_context")
7353 parseDirectiveSEHContext(Loc);
7354 else if (IDVal == ".seh_ec_context")
7355 parseDirectiveSEHECContext(Loc);
7356 else if (IDVal == ".seh_clear_unwound_to_call")
7357 parseDirectiveSEHClearUnwoundToCall(Loc);
7358 else if (IDVal == ".seh_pac_sign_lr")
7359 parseDirectiveSEHPACSignLR(Loc);
7360 else if (IDVal == ".seh_save_any_reg")
7361 parseDirectiveSEHSaveAnyReg(Loc, false, false);
7362 else if (IDVal == ".seh_save_any_reg_p")
7363 parseDirectiveSEHSaveAnyReg(Loc, true, false);
7364 else if (IDVal == ".seh_save_any_reg_x")
7365 parseDirectiveSEHSaveAnyReg(Loc, false, true);
7366 else if (IDVal == ".seh_save_any_reg_px")
7367 parseDirectiveSEHSaveAnyReg(Loc, true, true);
7368 else if (IDVal == ".seh_allocz")
7369 parseDirectiveSEHAllocZ(Loc);
7370 else if (IDVal == ".seh_save_zreg")
7371 parseDirectiveSEHSaveZReg(Loc);
7372 else if (IDVal == ".seh_save_preg")
7373 parseDirectiveSEHSavePReg(Loc);
7374 else
7375 return true;
7376 } else if (IsELF) {
7377 if (IDVal == ".aeabi_subsection")
7378 parseDirectiveAeabiSubSectionHeader(Loc);
7379 else if (IDVal == ".aeabi_attribute")
7380 parseDirectiveAeabiAArch64Attr(Loc);
7381 else
7382 return true;
7383 } else
7384 return true;
7385 return false;
7386}
7387
7388static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
7389 SmallVector<StringRef, 4> &RequestedExtensions) {
7390 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
7391 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
7392
7393 if (!NoCrypto && Crypto) {
7394 // Map 'generic' (and others) to sha2 and aes, because
7395 // that was the traditional meaning of crypto.
7396 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7397 ArchInfo == AArch64::ARMV8_3A) {
7398 RequestedExtensions.push_back("sha2");
7399 RequestedExtensions.push_back("aes");
7400 }
7401 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7402 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7403 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7404 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7405 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7406 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7407 RequestedExtensions.push_back("sm4");
7408 RequestedExtensions.push_back("sha3");
7409 RequestedExtensions.push_back("sha2");
7410 RequestedExtensions.push_back("aes");
7411 }
7412 } else if (NoCrypto) {
7413 // Map 'generic' (and others) to sha2 and aes, because
7414 // that was the traditional meaning of crypto.
7415 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7416 ArchInfo == AArch64::ARMV8_3A) {
7417 RequestedExtensions.push_back("nosha2");
7418 RequestedExtensions.push_back("noaes");
7419 }
7420 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7421 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7422 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7423 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7424 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7425 ArchInfo == AArch64::ARMV9_4A) {
7426 RequestedExtensions.push_back("nosm4");
7427 RequestedExtensions.push_back("nosha3");
7428 RequestedExtensions.push_back("nosha2");
7429 RequestedExtensions.push_back("noaes");
7430 }
7431 }
7432}
7433
7435 return SMLoc::getFromPointer(L.getPointer() + Offset);
7436}
7437
7438/// parseDirectiveArch
7439/// ::= .arch token
7440bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7441 SMLoc CurLoc = getLoc();
7442
7443 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7444 StringRef Arch, ExtensionString;
7445 std::tie(Arch, ExtensionString) = Name.split('+');
7446
7447 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
7448 if (!ArchInfo)
7449 return Error(CurLoc, "unknown arch name");
7450
7451 if (parseToken(AsmToken::EndOfStatement))
7452 return true;
7453
7454 // Get the architecture and extension features.
7455 std::vector<StringRef> AArch64Features;
7456 AArch64Features.push_back(ArchInfo->ArchFeature);
7457 AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
7458
7459 MCSubtargetInfo &STI = copySTI();
7460 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7461 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
7462 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
7463
7464 SmallVector<StringRef, 4> RequestedExtensions;
7465 if (!ExtensionString.empty())
7466 ExtensionString.split(RequestedExtensions, '+');
7467
7468 ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
7469 CurLoc = incrementLoc(CurLoc, Arch.size());
7470
7471 for (auto Name : RequestedExtensions) {
7472 // Advance source location past '+'.
7473 CurLoc = incrementLoc(CurLoc, 1);
7474
7475 bool EnableFeature = !Name.consume_front_insensitive("no");
7476
7477 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7478 return Extension.Name == Name;
7479 });
7480
7481 if (It == std::end(ExtensionMap))
7482 return Error(CurLoc, "unsupported architectural extension: " + Name);
7483
7484 if (EnableFeature)
7485 STI.SetFeatureBitsTransitively(It->Features);
7486 else
7487 STI.ClearFeatureBitsTransitively(It->Features);
7488 CurLoc = incrementLoc(CurLoc, Name.size());
7489 }
7490 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7491 setAvailableFeatures(Features);
7492
7493 getTargetStreamer().emitDirectiveArch(Name);
7494 return false;
7495}
7496
7497/// parseDirectiveArchExtension
7498/// ::= .arch_extension [no]feature
7499bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7500 SMLoc ExtLoc = getLoc();
7501
7502 StringRef FullName = getParser().parseStringToEndOfStatement().trim();
7503
7504 if (parseEOL())
7505 return true;
7506
7507 bool EnableFeature = true;
7508 StringRef Name = FullName;
7509 if (Name.starts_with_insensitive("no")) {
7510 EnableFeature = false;
7511 Name = Name.substr(2);
7512 }
7513
7514 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7515 return Extension.Name == Name;
7516 });
7517
7518 if (It == std::end(ExtensionMap))
7519 return Error(ExtLoc, "unsupported architectural extension: " + Name);
7520
7521 MCSubtargetInfo &STI = copySTI();
7522 if (EnableFeature)
7523 STI.SetFeatureBitsTransitively(It->Features);
7524 else
7525 STI.ClearFeatureBitsTransitively(It->Features);
7526 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7527 setAvailableFeatures(Features);
7528
7529 getTargetStreamer().emitDirectiveArchExtension(FullName);
7530 return false;
7531}
7532
7533/// parseDirectiveCPU
7534/// ::= .cpu id
7535bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7536 SMLoc CurLoc = getLoc();
7537
7538 StringRef CPU, ExtensionString;
7539 std::tie(CPU, ExtensionString) =
7540 getParser().parseStringToEndOfStatement().trim().split('+');
7541
7542 if (parseToken(AsmToken::EndOfStatement))
7543 return true;
7544
7545 SmallVector<StringRef, 4> RequestedExtensions;
7546 if (!ExtensionString.empty())
7547 ExtensionString.split(RequestedExtensions, '+');
7548
7549 const llvm::AArch64::ArchInfo *CpuArch = llvm::AArch64::getArchForCpu(CPU);
7550 if (!CpuArch) {
7551 Error(CurLoc, "unknown CPU name");
7552 return false;
7553 }
7554 ExpandCryptoAEK(*CpuArch, RequestedExtensions);
7555
7556 MCSubtargetInfo &STI = copySTI();
7557 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
7558 CurLoc = incrementLoc(CurLoc, CPU.size());
7559
7560 for (auto Name : RequestedExtensions) {
7561 // Advance source location past '+'.
7562 CurLoc = incrementLoc(CurLoc, 1);
7563
7564 bool EnableFeature = !Name.consume_front_insensitive("no");
7565
7566 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7567 return Extension.Name == Name;
7568 });
7569
7570 if (It == std::end(ExtensionMap))
7571 return Error(CurLoc, "unsupported architectural extension: " + Name);
7572
7573 if (EnableFeature)
7574 STI.SetFeatureBitsTransitively(It->Features);
7575 else
7576 STI.ClearFeatureBitsTransitively(It->Features);
7577 CurLoc = incrementLoc(CurLoc, Name.size());
7578 }
7579 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7580 setAvailableFeatures(Features);
7581 return false;
7582}
7583
7584/// parseDirectiveInst
7585/// ::= .inst opcode [, ...]
7586bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7587 if (getLexer().is(AsmToken::EndOfStatement))
7588 return Error(Loc, "expected expression following '.inst' directive");
7589
7590 auto parseOp = [&]() -> bool {
7591 SMLoc L = getLoc();
7592 const MCExpr *Expr = nullptr;
7593 if (check(getParser().parseExpression(Expr), L, "expected expression"))
7594 return true;
7595 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7596 if (check(!Value, L, "expected constant expression"))
7597 return true;
7598 getTargetStreamer().emitInst(Value->getValue());
7599 return false;
7600 };
7601
7602 return parseMany(parseOp);
7603}
7604
7605// parseDirectiveTLSDescCall:
7606// ::= .tlsdesccall symbol
7607bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7608 StringRef Name;
7609 if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7610 parseToken(AsmToken::EndOfStatement))
7611 return true;
7612
7613 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7614 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7616
7617 MCInst Inst;
7618 Inst.setOpcode(AArch64::TLSDESCCALL);
7620
7621 getParser().getStreamer().emitInstruction(Inst, getSTI());
7622 return false;
7623}
7624
7625/// ::= .loh <lohName | lohId> label1, ..., labelN
7626/// The number of arguments depends on the loh identifier.
7627bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7629 if (getTok().isNot(AsmToken::Identifier)) {
7630 if (getTok().isNot(AsmToken::Integer))
7631 return TokError("expected an identifier or a number in directive");
7632 // We successfully get a numeric value for the identifier.
7633 // Check if it is valid.
7634 int64_t Id = getTok().getIntVal();
7635 if (Id <= -1U && !isValidMCLOHType(Id))
7636 return TokError("invalid numeric identifier in directive");
7637 Kind = (MCLOHType)Id;
7638 } else {
7639 StringRef Name = getTok().getIdentifier();
7640 // We successfully parse an identifier.
7641 // Check if it is a recognized one.
7642 int Id = MCLOHNameToId(Name);
7643
7644 if (Id == -1)
7645 return TokError("invalid identifier in directive");
7646 Kind = (MCLOHType)Id;
7647 }
7648 // Consume the identifier.
7649 Lex();
7650 // Get the number of arguments of this LOH.
7651 int NbArgs = MCLOHIdToNbArgs(Kind);
7652
7653 assert(NbArgs != -1 && "Invalid number of arguments");
7654
7656 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7657 StringRef Name;
7658 if (getParser().parseIdentifier(Name))
7659 return TokError("expected identifier in directive");
7660 Args.push_back(getContext().getOrCreateSymbol(Name));
7661
7662 if (Idx + 1 == NbArgs)
7663 break;
7664 if (parseComma())
7665 return true;
7666 }
7667 if (parseEOL())
7668 return true;
7669
7670 getStreamer().emitLOHDirective(Kind, Args);
7671 return false;
7672}
7673
7674/// parseDirectiveLtorg
7675/// ::= .ltorg | .pool
7676bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7677 if (parseEOL())
7678 return true;
7679 getTargetStreamer().emitCurrentConstantPool();
7680 return false;
7681}
7682
7683/// parseDirectiveReq
7684/// ::= name .req registername
7685bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7686 Lex(); // Eat the '.req' token.
7687 SMLoc SRegLoc = getLoc();
7688 RegKind RegisterKind = RegKind::Scalar;
7689 MCRegister RegNum;
7690 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7691
7692 if (!ParseRes.isSuccess()) {
7693 StringRef Kind;
7694 RegisterKind = RegKind::NeonVector;
7695 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7696
7697 if (ParseRes.isFailure())
7698 return true;
7699
7700 if (ParseRes.isSuccess() && !Kind.empty())
7701 return Error(SRegLoc, "vector register without type specifier expected");
7702 }
7703
7704 if (!ParseRes.isSuccess()) {
7705 StringRef Kind;
7706 RegisterKind = RegKind::SVEDataVector;
7707 ParseRes =
7708 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7709
7710 if (ParseRes.isFailure())
7711 return true;
7712
7713 if (ParseRes.isSuccess() && !Kind.empty())
7714 return Error(SRegLoc,
7715 "sve vector register without type specifier expected");
7716 }
7717
7718 if (!ParseRes.isSuccess()) {
7719 StringRef Kind;
7720 RegisterKind = RegKind::SVEPredicateVector;
7721 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7722
7723 if (ParseRes.isFailure())
7724 return true;
7725
7726 if (ParseRes.isSuccess() && !Kind.empty())
7727 return Error(SRegLoc,
7728 "sve predicate register without type specifier expected");
7729 }
7730
7731 if (!ParseRes.isSuccess())
7732 return Error(SRegLoc, "register name or alias expected");
7733
7734 // Shouldn't be anything else.
7735 if (parseEOL())
7736 return true;
7737
7738 auto pair = std::make_pair(RegisterKind, RegNum);
7739 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7740 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7741
7742 return false;
7743}
7744
7745/// parseDirectiveUneq
7746/// ::= .unreq registername
7747bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7748 if (getTok().isNot(AsmToken::Identifier))
7749 return TokError("unexpected input in .unreq directive.");
7750 RegisterReqs.erase(getTok().getIdentifier().lower());
7751 Lex(); // Eat the identifier.
7752 return parseToken(AsmToken::EndOfStatement);
7753}
7754
7755bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7756 if (parseEOL())
7757 return true;
7758 getStreamer().emitCFINegateRAState();
7759 return false;
7760}
7761
7762bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7763 if (parseEOL())
7764 return true;
7765 getStreamer().emitCFINegateRAStateWithPC();
7766 return false;
7767}
7768
7769/// parseDirectiveCFIBKeyFrame
7770/// ::= .cfi_b_key
7771bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7772 if (parseEOL())
7773 return true;
7774 getStreamer().emitCFIBKeyFrame();
7775 return false;
7776}
7777
7778/// parseDirectiveCFIMTETaggedFrame
7779/// ::= .cfi_mte_tagged_frame
7780bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7781 if (parseEOL())
7782 return true;
7783 getStreamer().emitCFIMTETaggedFrame();
7784 return false;
7785}
7786
7787/// parseDirectiveVariantPCS
7788/// ::= .variant_pcs symbolname
7789bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7790 StringRef Name;
7791 if (getParser().parseIdentifier(Name))
7792 return TokError("expected symbol name");
7793 if (parseEOL())
7794 return true;
7795 getTargetStreamer().emitDirectiveVariantPCS(
7796 getContext().getOrCreateSymbol(Name));
7797 return false;
7798}
7799
7800/// parseDirectiveSEHAllocStack
7801/// ::= .seh_stackalloc
7802bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7803 int64_t Size;
7804 if (parseImmExpr(Size))
7805 return true;
7806 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7807 return false;
7808}
7809
7810/// parseDirectiveSEHPrologEnd
7811/// ::= .seh_endprologue
7812bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7813 getTargetStreamer().emitARM64WinCFIPrologEnd();
7814 return false;
7815}
7816
7817/// parseDirectiveSEHSaveR19R20X
7818/// ::= .seh_save_r19r20_x
7819bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7820 int64_t Offset;
7821 if (parseImmExpr(Offset))
7822 return true;
7823 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7824 return false;
7825}
7826
7827/// parseDirectiveSEHSaveFPLR
7828/// ::= .seh_save_fplr
7829bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7830 int64_t Offset;
7831 if (parseImmExpr(Offset))
7832 return true;
7833 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7834 return false;
7835}
7836
7837/// parseDirectiveSEHSaveFPLRX
7838/// ::= .seh_save_fplr_x
7839bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7840 int64_t Offset;
7841 if (parseImmExpr(Offset))
7842 return true;
7843 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7844 return false;
7845}
7846
7847/// parseDirectiveSEHSaveReg
7848/// ::= .seh_save_reg
7849bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7850 unsigned Reg;
7851 int64_t Offset;
7852 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7853 parseComma() || parseImmExpr(Offset))
7854 return true;
7855 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7856 return false;
7857}
7858
7859/// parseDirectiveSEHSaveRegX
7860/// ::= .seh_save_reg_x
7861bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7862 unsigned Reg;
7863 int64_t Offset;
7864 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7865 parseComma() || parseImmExpr(Offset))
7866 return true;
7867 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7868 return false;
7869}
7870
7871/// parseDirectiveSEHSaveRegP
7872/// ::= .seh_save_regp
7873bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7874 unsigned Reg;
7875 int64_t Offset;
7876 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7877 parseComma() || parseImmExpr(Offset))
7878 return true;
7879 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7880 return false;
7881}
7882
7883/// parseDirectiveSEHSaveRegPX
7884/// ::= .seh_save_regp_x
7885bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7886 unsigned Reg;
7887 int64_t Offset;
7888 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7889 parseComma() || parseImmExpr(Offset))
7890 return true;
7891 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7892 return false;
7893}
7894
7895/// parseDirectiveSEHSaveLRPair
7896/// ::= .seh_save_lrpair
7897bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7898 unsigned Reg;
7899 int64_t Offset;
7900 L = getLoc();
7901 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7902 parseComma() || parseImmExpr(Offset))
7903 return true;
7904 if (check(((Reg - 19) % 2 != 0), L,
7905 "expected register with even offset from x19"))
7906 return true;
7907 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7908 return false;
7909}
7910
7911/// parseDirectiveSEHSaveFReg
7912/// ::= .seh_save_freg
7913bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7914 unsigned Reg;
7915 int64_t Offset;
7916 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7917 parseComma() || parseImmExpr(Offset))
7918 return true;
7919 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7920 return false;
7921}
7922
7923/// parseDirectiveSEHSaveFRegX
7924/// ::= .seh_save_freg_x
7925bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7926 unsigned Reg;
7927 int64_t Offset;
7928 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7929 parseComma() || parseImmExpr(Offset))
7930 return true;
7931 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7932 return false;
7933}
7934
7935/// parseDirectiveSEHSaveFRegP
7936/// ::= .seh_save_fregp
7937bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7938 unsigned Reg;
7939 int64_t Offset;
7940 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7941 parseComma() || parseImmExpr(Offset))
7942 return true;
7943 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7944 return false;
7945}
7946
7947/// parseDirectiveSEHSaveFRegPX
7948/// ::= .seh_save_fregp_x
7949bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7950 unsigned Reg;
7951 int64_t Offset;
7952 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7953 parseComma() || parseImmExpr(Offset))
7954 return true;
7955 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7956 return false;
7957}
7958
7959/// parseDirectiveSEHSetFP
7960/// ::= .seh_set_fp
7961bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7962 getTargetStreamer().emitARM64WinCFISetFP();
7963 return false;
7964}
7965
7966/// parseDirectiveSEHAddFP
7967/// ::= .seh_add_fp
7968bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7969 int64_t Size;
7970 if (parseImmExpr(Size))
7971 return true;
7972 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7973 return false;
7974}
7975
7976/// parseDirectiveSEHNop
7977/// ::= .seh_nop
7978bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7979 getTargetStreamer().emitARM64WinCFINop();
7980 return false;
7981}
7982
7983/// parseDirectiveSEHSaveNext
7984/// ::= .seh_save_next
7985bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7986 getTargetStreamer().emitARM64WinCFISaveNext();
7987 return false;
7988}
7989
7990/// parseDirectiveSEHEpilogStart
7991/// ::= .seh_startepilogue
7992bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7993 getTargetStreamer().emitARM64WinCFIEpilogStart();
7994 return false;
7995}
7996
7997/// parseDirectiveSEHEpilogEnd
7998/// ::= .seh_endepilogue
7999bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
8000 getTargetStreamer().emitARM64WinCFIEpilogEnd();
8001 return false;
8002}
8003
8004/// parseDirectiveSEHTrapFrame
8005/// ::= .seh_trap_frame
8006bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
8007 getTargetStreamer().emitARM64WinCFITrapFrame();
8008 return false;
8009}
8010
8011/// parseDirectiveSEHMachineFrame
8012/// ::= .seh_pushframe
8013bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
8014 getTargetStreamer().emitARM64WinCFIMachineFrame();
8015 return false;
8016}
8017
8018/// parseDirectiveSEHContext
8019/// ::= .seh_context
8020bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
8021 getTargetStreamer().emitARM64WinCFIContext();
8022 return false;
8023}
8024
8025/// parseDirectiveSEHECContext
8026/// ::= .seh_ec_context
8027bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
8028 getTargetStreamer().emitARM64WinCFIECContext();
8029 return false;
8030}
8031
8032/// parseDirectiveSEHClearUnwoundToCall
8033/// ::= .seh_clear_unwound_to_call
8034bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
8035 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
8036 return false;
8037}
8038
8039/// parseDirectiveSEHPACSignLR
8040/// ::= .seh_pac_sign_lr
8041bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
8042 getTargetStreamer().emitARM64WinCFIPACSignLR();
8043 return false;
8044}
8045
8046/// parseDirectiveSEHSaveAnyReg
8047/// ::= .seh_save_any_reg
8048/// ::= .seh_save_any_reg_p
8049/// ::= .seh_save_any_reg_x
8050/// ::= .seh_save_any_reg_px
8051bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
8052 bool Writeback) {
8053 MCRegister Reg;
8054 SMLoc Start, End;
8055 int64_t Offset;
8056 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
8057 parseComma() || parseImmExpr(Offset))
8058 return true;
8059
8060 if (Reg == AArch64::FP || Reg == AArch64::LR ||
8061 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
8062 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8063 return Error(L, "invalid save_any_reg offset");
8064 unsigned EncodedReg;
8065 if (Reg == AArch64::FP)
8066 EncodedReg = 29;
8067 else if (Reg == AArch64::LR)
8068 EncodedReg = 30;
8069 else
8070 EncodedReg = Reg - AArch64::X0;
8071 if (Paired) {
8072 if (Reg == AArch64::LR)
8073 return Error(Start, "lr cannot be paired with another register");
8074 if (Writeback)
8075 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
8076 else
8077 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
8078 } else {
8079 if (Writeback)
8080 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
8081 else
8082 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
8083 }
8084 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
8085 unsigned EncodedReg = Reg - AArch64::D0;
8086 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8087 return Error(L, "invalid save_any_reg offset");
8088 if (Paired) {
8089 if (Reg == AArch64::D31)
8090 return Error(Start, "d31 cannot be paired with another register");
8091 if (Writeback)
8092 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
8093 else
8094 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
8095 } else {
8096 if (Writeback)
8097 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
8098 else
8099 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
8100 }
8101 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
8102 unsigned EncodedReg = Reg - AArch64::Q0;
8103 if (Offset < 0 || Offset % 16)
8104 return Error(L, "invalid save_any_reg offset");
8105 if (Paired) {
8106 if (Reg == AArch64::Q31)
8107 return Error(Start, "q31 cannot be paired with another register");
8108 if (Writeback)
8109 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
8110 else
8111 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
8112 } else {
8113 if (Writeback)
8114 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
8115 else
8116 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
8117 }
8118 } else {
8119 return Error(Start, "save_any_reg register must be x, q or d register");
8120 }
8121 return false;
8122}
8123
8124/// parseDirectiveAllocZ
8125/// ::= .seh_allocz
8126bool AArch64AsmParser::parseDirectiveSEHAllocZ(SMLoc L) {
8127 int64_t Offset;
8128 if (parseImmExpr(Offset))
8129 return true;
8130 getTargetStreamer().emitARM64WinCFIAllocZ(Offset);
8131 return false;
8132}
8133
8134/// parseDirectiveSEHSaveZReg
8135/// ::= .seh_save_zreg
8136bool AArch64AsmParser::parseDirectiveSEHSaveZReg(SMLoc L) {
8137 MCRegister RegNum;
8138 StringRef Kind;
8139 int64_t Offset;
8140 ParseStatus Res =
8141 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8142 if (!Res.isSuccess())
8143 return true;
8144 if (check(RegNum < AArch64::Z8 || RegNum > AArch64::Z23, L,
8145 "expected register in range z8 to z23"))
8146 return true;
8147 if (parseComma() || parseImmExpr(Offset))
8148 return true;
8149 getTargetStreamer().emitARM64WinCFISaveZReg(RegNum - AArch64::Z0, Offset);
8150 return false;
8151}
8152
8153/// parseDirectiveSEHSavePReg
8154/// ::= .seh_save_preg
8155bool AArch64AsmParser::parseDirectiveSEHSavePReg(SMLoc L) {
8156 MCRegister RegNum;
8157 StringRef Kind;
8158 int64_t Offset;
8159 ParseStatus Res =
8160 tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
8161 if (!Res.isSuccess())
8162 return true;
8163 if (check(RegNum < AArch64::P4 || RegNum > AArch64::P15, L,
8164 "expected register in range p4 to p15"))
8165 return true;
8166 if (parseComma() || parseImmExpr(Offset))
8167 return true;
8168 getTargetStreamer().emitARM64WinCFISavePReg(RegNum - AArch64::P0, Offset);
8169 return false;
8170}
8171
8172bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
8173 // Handle parsing of .aeabi_subsection directives
8174 // - On first declaration of a subsection, expect exactly three identifiers
8175 // after `.aeabi_subsection`: the subsection name and two parameters.
8176 // - When switching to an existing subsection, it is valid to provide only
8177 // the subsection name, or the name together with the two parameters.
8178 MCAsmParser &Parser = getParser();
8179
8180 // Consume the name (subsection name)
8181 StringRef SubsectionName;
8182 AArch64BuildAttributes::VendorID SubsectionNameID;
8183 if (Parser.getTok().is(AsmToken::Identifier)) {
8184 SubsectionName = Parser.getTok().getIdentifier();
8185 SubsectionNameID = AArch64BuildAttributes::getVendorID(SubsectionName);
8186 } else {
8187 Error(Parser.getTok().getLoc(), "subsection name not found");
8188 return true;
8189 }
8190 Parser.Lex();
8191
8192 std::unique_ptr<MCELFStreamer::AttributeSubSection> SubsectionExists =
8193 getTargetStreamer().getAttributesSubsectionByName(SubsectionName);
8194 // Check whether only the subsection name was provided.
8195 // If so, the user is trying to switch to a subsection that should have been
8196 // declared before.
8198 if (SubsectionExists) {
8199 getTargetStreamer().emitAttributesSubsection(
8200 SubsectionName,
8202 SubsectionExists->IsOptional),
8204 SubsectionExists->ParameterType));
8205 return false;
8206 }
8207 // If subsection does not exists, report error.
8208 else {
8209 Error(Parser.getTok().getLoc(),
8210 "Could not switch to subsection '" + SubsectionName +
8211 "' using subsection name, subsection has not been defined");
8212 return true;
8213 }
8214 }
8215
8216 // Otherwise, expecting 2 more parameters: consume a comma
8217 // parseComma() return *false* on success, and call Lex(), no need to call
8218 // Lex() again.
8219 if (Parser.parseComma()) {
8220 return true;
8221 }
8222
8223 // Consume the first parameter (optionality parameter)
8225 // options: optional/required
8226 if (Parser.getTok().is(AsmToken::Identifier)) {
8227 StringRef Optionality = Parser.getTok().getIdentifier();
8228 IsOptional = AArch64BuildAttributes::getOptionalID(Optionality);
8230 Error(Parser.getTok().getLoc(),
8232 return true;
8233 }
8234 if (SubsectionExists) {
8235 if (IsOptional != SubsectionExists->IsOptional) {
8236 Error(Parser.getTok().getLoc(),
8237 "optionality mismatch! subsection '" + SubsectionName +
8238 "' already exists with optionality defined as '" +
8240 SubsectionExists->IsOptional) +
8241 "' and not '" +
8242 AArch64BuildAttributes::getOptionalStr(IsOptional) + "'");
8243 return true;
8244 }
8245 }
8246 } else {
8247 Error(Parser.getTok().getLoc(),
8248 "optionality parameter not found, expected required|optional");
8249 return true;
8250 }
8251 // Check for possible IsOptional unaccepted values for known subsections
8252 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID) {
8253 if (AArch64BuildAttributes::REQUIRED == IsOptional) {
8254 Error(Parser.getTok().getLoc(),
8255 "aeabi_feature_and_bits must be marked as optional");
8256 return true;
8257 }
8258 }
8259 if (AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8260 if (AArch64BuildAttributes::OPTIONAL == IsOptional) {
8261 Error(Parser.getTok().getLoc(),
8262 "aeabi_pauthabi must be marked as required");
8263 return true;
8264 }
8265 }
8266 Parser.Lex();
8267 // consume a comma
8268 if (Parser.parseComma()) {
8269 return true;
8270 }
8271
8272 // Consume the second parameter (type parameter)
8274 if (Parser.getTok().is(AsmToken::Identifier)) {
8275 StringRef Name = Parser.getTok().getIdentifier();
8278 Error(Parser.getTok().getLoc(),
8280 return true;
8281 }
8282 if (SubsectionExists) {
8283 if (Type != SubsectionExists->ParameterType) {
8284 Error(Parser.getTok().getLoc(),
8285 "type mismatch! subsection '" + SubsectionName +
8286 "' already exists with type defined as '" +
8288 SubsectionExists->ParameterType) +
8289 "' and not '" + AArch64BuildAttributes::getTypeStr(Type) +
8290 "'");
8291 return true;
8292 }
8293 }
8294 } else {
8295 Error(Parser.getTok().getLoc(),
8296 "type parameter not found, expected uleb128|ntbs");
8297 return true;
8298 }
8299 // Check for possible unaccepted 'type' values for known subsections
8300 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID ||
8301 AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8303 Error(Parser.getTok().getLoc(),
8304 SubsectionName + " must be marked as ULEB128");
8305 return true;
8306 }
8307 }
8308 Parser.Lex();
8309
8310 // Parsing finished, check for trailing tokens.
8312 Error(Parser.getTok().getLoc(), "unexpected token for AArch64 build "
8313 "attributes subsection header directive");
8314 return true;
8315 }
8316
8317 getTargetStreamer().emitAttributesSubsection(SubsectionName, IsOptional, Type);
8318
8319 return false;
8320}
8321
8322bool AArch64AsmParser::parseDirectiveAeabiAArch64Attr(SMLoc L) {
8323 // Expecting 2 Tokens: after '.aeabi_attribute', e.g.:
8324 // .aeabi_attribute (1)Tag_Feature_BTI, (2)[uleb128|ntbs]
8325 // separated by a comma.
8326 MCAsmParser &Parser = getParser();
8327
8328 std::unique_ptr<MCELFStreamer::AttributeSubSection> ActiveSubsection =
8329 getTargetStreamer().getActiveAttributesSubsection();
8330 if (nullptr == ActiveSubsection) {
8331 Error(Parser.getTok().getLoc(),
8332 "no active subsection, build attribute can not be added");
8333 return true;
8334 }
8335 StringRef ActiveSubsectionName = ActiveSubsection->VendorName;
8336 unsigned ActiveSubsectionType = ActiveSubsection->ParameterType;
8337
8338 unsigned ActiveSubsectionID = AArch64BuildAttributes::VENDOR_UNKNOWN;
8340 AArch64BuildAttributes::AEABI_PAUTHABI) == ActiveSubsectionName)
8341 ActiveSubsectionID = AArch64BuildAttributes::AEABI_PAUTHABI;
8344 ActiveSubsectionName)
8346
8347 StringRef TagStr = "";
8348 unsigned Tag;
8349 if (Parser.getTok().is(AsmToken::Integer)) {
8350 Tag = getTok().getIntVal();
8351 } else if (Parser.getTok().is(AsmToken::Identifier)) {
8352 TagStr = Parser.getTok().getIdentifier();
8353 switch (ActiveSubsectionID) {
8355 // Tag was provided as an unrecognized string instead of an unsigned
8356 // integer
8357 Error(Parser.getTok().getLoc(), "unrecognized Tag: '" + TagStr +
8358 "' \nExcept for public subsections, "
8359 "tags have to be an unsigned int.");
8360 return true;
8361 break;
8365 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8366 TagStr + "' for subsection '" +
8367 ActiveSubsectionName + "'");
8368 return true;
8369 }
8370 break;
8374 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8375 TagStr + "' for subsection '" +
8376 ActiveSubsectionName + "'");
8377 return true;
8378 }
8379 break;
8380 }
8381 } else {
8382 Error(Parser.getTok().getLoc(), "AArch64 build attributes tag not found");
8383 return true;
8384 }
8385 Parser.Lex();
8386 // consume a comma
8387 // parseComma() return *false* on success, and call Lex(), no need to call
8388 // Lex() again.
8389 if (Parser.parseComma()) {
8390 return true;
8391 }
8392
8393 // Consume the second parameter (attribute value)
8394 unsigned ValueInt = unsigned(-1);
8395 std::string ValueStr = "";
8396 if (Parser.getTok().is(AsmToken::Integer)) {
8397 if (AArch64BuildAttributes::NTBS == ActiveSubsectionType) {
8398 Error(
8399 Parser.getTok().getLoc(),
8400 "active subsection type is NTBS (string), found ULEB128 (unsigned)");
8401 return true;
8402 }
8403 ValueInt = getTok().getIntVal();
8404 } else if (Parser.getTok().is(AsmToken::Identifier)) {
8405 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8406 Error(
8407 Parser.getTok().getLoc(),
8408 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8409 return true;
8410 }
8411 ValueStr = Parser.getTok().getIdentifier();
8412 } else if (Parser.getTok().is(AsmToken::String)) {
8413 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8414 Error(
8415 Parser.getTok().getLoc(),
8416 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8417 return true;
8418 }
8419 ValueStr = Parser.getTok().getString();
8420 } else {
8421 Error(Parser.getTok().getLoc(), "AArch64 build attributes value not found");
8422 return true;
8423 }
8424 // Check for possible unaccepted values for known tags
8425 // (AEABI_FEATURE_AND_BITS)
8426 if (ActiveSubsectionID == AArch64BuildAttributes::AEABI_FEATURE_AND_BITS) {
8427 if (0 != ValueInt && 1 != ValueInt) {
8428 Error(Parser.getTok().getLoc(),
8429 "unknown AArch64 build attributes Value for Tag '" + TagStr +
8430 "' options are 0|1");
8431 return true;
8432 }
8433 }
8434 Parser.Lex();
8435
8436 // Parsing finished. Check for trailing tokens.
8438 Error(Parser.getTok().getLoc(),
8439 "unexpected token for AArch64 build attributes tag and value "
8440 "attribute directive");
8441 return true;
8442 }
8443
8444 if (unsigned(-1) != ValueInt) {
8445 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, ValueInt, "");
8446 }
8447 if ("" != ValueStr) {
8448 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, unsigned(-1),
8449 ValueStr);
8450 }
8451 return false;
8452}
8453
8454bool AArch64AsmParser::parseExprWithSpecifier(const MCExpr *&Res, SMLoc &E) {
8455 SMLoc Loc = getLoc();
8456 if (getLexer().getKind() != AsmToken::Identifier)
8457 return TokError("expected '%' relocation specifier");
8458 StringRef Identifier = getParser().getTok().getIdentifier();
8459 auto Spec = AArch64::parsePercentSpecifierName(Identifier);
8460 if (!Spec)
8461 return TokError("invalid relocation specifier");
8462
8463 getParser().Lex(); // Eat the identifier
8464 if (parseToken(AsmToken::LParen, "expected '('"))
8465 return true;
8466
8467 const MCExpr *SubExpr;
8468 if (getParser().parseParenExpression(SubExpr, E))
8469 return true;
8470
8471 Res = MCSpecifierExpr::create(SubExpr, Spec, getContext(), Loc);
8472 return false;
8473}
8474
8475bool AArch64AsmParser::parseDataExpr(const MCExpr *&Res) {
8476 SMLoc EndLoc;
8477 if (parseOptionalToken(AsmToken::Percent))
8478 return parseExprWithSpecifier(Res, EndLoc);
8479
8480 if (getParser().parseExpression(Res))
8481 return true;
8482 MCAsmParser &Parser = getParser();
8483 if (!parseOptionalToken(AsmToken::At))
8484 return false;
8485 if (getLexer().getKind() != AsmToken::Identifier)
8486 return Error(getLoc(), "expected relocation specifier");
8487
8488 std::string Identifier = Parser.getTok().getIdentifier().lower();
8489 SMLoc Loc = getLoc();
8490 Lex();
8491 if (Identifier == "auth")
8492 return parseAuthExpr(Res, EndLoc);
8493
8494 auto Spec = AArch64::S_None;
8495 if (STI->getTargetTriple().isOSBinFormatMachO()) {
8496 if (Identifier == "got")
8497 Spec = AArch64::S_MACHO_GOT;
8498 }
8499 if (Spec == AArch64::S_None)
8500 return Error(Loc, "invalid relocation specifier");
8501 if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Res))
8502 Res = MCSymbolRefExpr::create(&SRE->getSymbol(), Spec, getContext(),
8503 SRE->getLoc());
8504 else
8505 return Error(Loc, "@ specifier only allowed after a symbol");
8506
8507 for (;;) {
8508 std::optional<MCBinaryExpr::Opcode> Opcode;
8509 if (parseOptionalToken(AsmToken::Plus))
8510 Opcode = MCBinaryExpr::Add;
8511 else if (parseOptionalToken(AsmToken::Minus))
8512 Opcode = MCBinaryExpr::Sub;
8513 else
8514 break;
8515 const MCExpr *Term;
8516 if (getParser().parsePrimaryExpr(Term, EndLoc, nullptr))
8517 return true;
8518 Res = MCBinaryExpr::create(*Opcode, Res, Term, getContext(), Res->getLoc());
8519 }
8520 return false;
8521}
8522
8523/// parseAuthExpr
8524/// ::= _sym@AUTH(ib,123[,addr])
8525/// ::= (_sym + 5)@AUTH(ib,123[,addr])
8526/// ::= (_sym - 5)@AUTH(ib,123[,addr])
8527bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
8528 MCAsmParser &Parser = getParser();
8529 MCContext &Ctx = getContext();
8530 AsmToken Tok = Parser.getTok();
8531
8532 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
8533 if (parseToken(AsmToken::LParen, "expected '('"))
8534 return true;
8535
8536 if (Parser.getTok().isNot(AsmToken::Identifier))
8537 return TokError("expected key name");
8538
8539 StringRef KeyStr = Parser.getTok().getIdentifier();
8540 auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr);
8541 if (!KeyIDOrNone)
8542 return TokError("invalid key '" + KeyStr + "'");
8543 Parser.Lex();
8544
8545 if (parseToken(AsmToken::Comma, "expected ','"))
8546 return true;
8547
8548 if (Parser.getTok().isNot(AsmToken::Integer))
8549 return TokError("expected integer discriminator");
8550 int64_t Discriminator = Parser.getTok().getIntVal();
8551
8552 if (!isUInt<16>(Discriminator))
8553 return TokError("integer discriminator " + Twine(Discriminator) +
8554 " out of range [0, 0xFFFF]");
8555 Parser.Lex();
8556
8557 bool UseAddressDiversity = false;
8558 if (Parser.getTok().is(AsmToken::Comma)) {
8559 Parser.Lex();
8560 if (Parser.getTok().isNot(AsmToken::Identifier) ||
8561 Parser.getTok().getIdentifier() != "addr")
8562 return TokError("expected 'addr'");
8563 UseAddressDiversity = true;
8564 Parser.Lex();
8565 }
8566
8567 EndLoc = Parser.getTok().getEndLoc();
8568 if (parseToken(AsmToken::RParen, "expected ')'"))
8569 return true;
8570
8571 Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone,
8572 UseAddressDiversity, Ctx, Res->getLoc());
8573 return false;
8574}
8575
8576bool AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
8577 AArch64::Specifier &ELFSpec,
8578 AArch64::Specifier &DarwinSpec,
8579 int64_t &Addend) {
8580 ELFSpec = AArch64::S_INVALID;
8581 DarwinSpec = AArch64::S_None;
8582 Addend = 0;
8583
8584 if (auto *AE = dyn_cast<MCSpecifierExpr>(Expr)) {
8585 ELFSpec = AE->getSpecifier();
8586 Expr = AE->getSubExpr();
8587 }
8588
8589 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
8590 if (SE) {
8591 // It's a simple symbol reference with no addend.
8592 DarwinSpec = AArch64::Specifier(SE->getKind());
8593 return true;
8594 }
8595
8596 // Check that it looks like a symbol + an addend
8597 MCValue Res;
8598 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr);
8599 if (!Relocatable || Res.getSubSym())
8600 return false;
8601
8602 // Treat expressions with an ELFSpec (like ":abs_g1:3", or
8603 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
8604 if (!Res.getAddSym() && ELFSpec == AArch64::S_INVALID)
8605 return false;
8606
8607 if (Res.getAddSym())
8608 DarwinSpec = AArch64::Specifier(Res.getSpecifier());
8609 Addend = Res.getConstant();
8610
8611 // It's some symbol reference + a constant addend, but really
8612 // shouldn't use both Darwin and ELF syntax.
8613 return ELFSpec == AArch64::S_INVALID || DarwinSpec == AArch64::S_None;
8614}
8615
8616/// Force static initialization.
8617extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
8625
8626#define GET_REGISTER_MATCHER
8627#define GET_SUBTARGET_FEATURE_NAME
8628#define GET_MATCHER_IMPLEMENTATION
8629#define GET_MNEMONIC_SPELL_CHECKER
8630#include "AArch64GenAsmMatcher.inc"
8631
8632// Define this matcher function after the auto-generated include so we
8633// have the match class enum definitions.
8634unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
8635 unsigned Kind) {
8636 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
8637
8638 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
8639 if (!Op.isImm())
8640 return Match_InvalidOperand;
8641 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
8642 if (!CE)
8643 return Match_InvalidOperand;
8644 if (CE->getValue() == ExpectedVal)
8645 return Match_Success;
8646 return Match_InvalidOperand;
8647 };
8648
8649 switch (Kind) {
8650 default:
8651 return Match_InvalidOperand;
8652 case MCK_MPR:
8653 // If the Kind is a token for the MPR register class which has the "za"
8654 // register (SME accumulator array), check if the asm is a literal "za"
8655 // token. This is for the "smstart za" alias that defines the register
8656 // as a literal token.
8657 if (Op.isTokenEqual("za"))
8658 return Match_Success;
8659 return Match_InvalidOperand;
8660
8661 // If the kind is a token for a literal immediate, check if our asm operand
8662 // matches. This is for InstAliases which have a fixed-value immediate in
8663 // the asm string, such as hints which are parsed into a specific
8664 // instruction definition.
8665#define MATCH_HASH(N) \
8666 case MCK__HASH_##N: \
8667 return MatchesOpImmediate(N);
8668 MATCH_HASH(0)
8669 MATCH_HASH(1)
8670 MATCH_HASH(2)
8671 MATCH_HASH(3)
8672 MATCH_HASH(4)
8673 MATCH_HASH(6)
8674 MATCH_HASH(7)
8675 MATCH_HASH(8)
8676 MATCH_HASH(10)
8677 MATCH_HASH(12)
8678 MATCH_HASH(14)
8679 MATCH_HASH(16)
8680 MATCH_HASH(24)
8681 MATCH_HASH(25)
8682 MATCH_HASH(26)
8683 MATCH_HASH(27)
8684 MATCH_HASH(28)
8685 MATCH_HASH(29)
8686 MATCH_HASH(30)
8687 MATCH_HASH(31)
8688 MATCH_HASH(32)
8689 MATCH_HASH(40)
8690 MATCH_HASH(48)
8691 MATCH_HASH(64)
8692#undef MATCH_HASH
8693#define MATCH_HASH_MINUS(N) \
8694 case MCK__HASH__MINUS_##N: \
8695 return MatchesOpImmediate(-N);
8699#undef MATCH_HASH_MINUS
8700 }
8701}
8702
8703ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
8704
8705 SMLoc S = getLoc();
8706
8707 if (getTok().isNot(AsmToken::Identifier))
8708 return Error(S, "expected register");
8709
8710 MCRegister FirstReg;
8711 ParseStatus Res = tryParseScalarRegister(FirstReg);
8712 if (!Res.isSuccess())
8713 return Error(S, "expected first even register of a consecutive same-size "
8714 "even/odd register pair");
8715
8716 const MCRegisterClass &WRegClass =
8717 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8718 const MCRegisterClass &XRegClass =
8719 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8720
8721 bool isXReg = XRegClass.contains(FirstReg),
8722 isWReg = WRegClass.contains(FirstReg);
8723 if (!isXReg && !isWReg)
8724 return Error(S, "expected first even register of a consecutive same-size "
8725 "even/odd register pair");
8726
8727 const MCRegisterInfo *RI = getContext().getRegisterInfo();
8728 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
8729
8730 if (FirstEncoding & 0x1)
8731 return Error(S, "expected first even register of a consecutive same-size "
8732 "even/odd register pair");
8733
8734 if (getTok().isNot(AsmToken::Comma))
8735 return Error(getLoc(), "expected comma");
8736 // Eat the comma
8737 Lex();
8738
8739 SMLoc E = getLoc();
8740 MCRegister SecondReg;
8741 Res = tryParseScalarRegister(SecondReg);
8742 if (!Res.isSuccess())
8743 return Error(E, "expected second odd register of a consecutive same-size "
8744 "even/odd register pair");
8745
8746 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
8747 (isXReg && !XRegClass.contains(SecondReg)) ||
8748 (isWReg && !WRegClass.contains(SecondReg)))
8749 return Error(E, "expected second odd register of a consecutive same-size "
8750 "even/odd register pair");
8751
8752 MCRegister Pair;
8753 if (isXReg) {
8754 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
8755 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8756 } else {
8757 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
8758 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8759 }
8760
8761 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
8762 getLoc(), getContext()));
8763
8764 return ParseStatus::Success;
8765}
8766
8767template <bool ParseShiftExtend, bool ParseSuffix>
8768ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
8769 const SMLoc S = getLoc();
8770 // Check for a SVE vector register specifier first.
8771 MCRegister RegNum;
8772 StringRef Kind;
8773
8774 ParseStatus Res =
8775 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8776
8777 if (!Res.isSuccess())
8778 return Res;
8779
8780 if (ParseSuffix && Kind.empty())
8781 return ParseStatus::NoMatch;
8782
8783 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
8784 if (!KindRes)
8785 return ParseStatus::NoMatch;
8786
8787 unsigned ElementWidth = KindRes->second;
8788
8789 // No shift/extend is the default.
8790 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
8791 Operands.push_back(AArch64Operand::CreateVectorReg(
8792 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
8793
8794 ParseStatus Res = tryParseVectorIndex(Operands);
8795 if (Res.isFailure())
8796 return ParseStatus::Failure;
8797 return ParseStatus::Success;
8798 }
8799
8800 // Eat the comma
8801 Lex();
8802
8803 // Match the shift
8805 Res = tryParseOptionalShiftExtend(ExtOpnd);
8806 if (!Res.isSuccess())
8807 return Res;
8808
8809 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
8810 Operands.push_back(AArch64Operand::CreateVectorReg(
8811 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
8812 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
8813 Ext->hasShiftExtendAmount()));
8814
8815 return ParseStatus::Success;
8816}
8817
8818ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
8819 MCAsmParser &Parser = getParser();
8820
8821 SMLoc SS = getLoc();
8822 const AsmToken &TokE = getTok();
8823 bool IsHash = TokE.is(AsmToken::Hash);
8824
8825 if (!IsHash && TokE.isNot(AsmToken::Identifier))
8826 return ParseStatus::NoMatch;
8827
8828 int64_t Pattern;
8829 if (IsHash) {
8830 Lex(); // Eat hash
8831
8832 // Parse the immediate operand.
8833 const MCExpr *ImmVal;
8834 SS = getLoc();
8835 if (Parser.parseExpression(ImmVal))
8836 return ParseStatus::Failure;
8837
8838 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
8839 if (!MCE)
8840 return TokError("invalid operand for instruction");
8841
8842 Pattern = MCE->getValue();
8843 } else {
8844 // Parse the pattern
8845 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
8846 if (!Pat)
8847 return ParseStatus::NoMatch;
8848
8849 Lex();
8850 Pattern = Pat->Encoding;
8851 assert(Pattern >= 0 && Pattern < 32);
8852 }
8853
8854 Operands.push_back(
8855 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8856 SS, getLoc(), getContext()));
8857
8858 return ParseStatus::Success;
8859}
8860
8861ParseStatus
8862AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8863 int64_t Pattern;
8864 SMLoc SS = getLoc();
8865 const AsmToken &TokE = getTok();
8866 // Parse the pattern
8867 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8868 TokE.getString());
8869 if (!Pat)
8870 return ParseStatus::NoMatch;
8871
8872 Lex();
8873 Pattern = Pat->Encoding;
8874 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8875
8876 Operands.push_back(
8877 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8878 SS, getLoc(), getContext()));
8879
8880 return ParseStatus::Success;
8881}
8882
8883ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8884 SMLoc SS = getLoc();
8885
8886 MCRegister XReg;
8887 if (!tryParseScalarRegister(XReg).isSuccess())
8888 return ParseStatus::NoMatch;
8889
8890 MCContext &ctx = getContext();
8891 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8892 MCRegister X8Reg = RI->getMatchingSuperReg(
8893 XReg, AArch64::x8sub_0,
8894 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8895 if (!X8Reg)
8896 return Error(SS,
8897 "expected an even-numbered x-register in the range [x0,x22]");
8898
8899 Operands.push_back(
8900 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8901 return ParseStatus::Success;
8902}
8903
8904ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8905 SMLoc S = getLoc();
8906
8907 if (getTok().isNot(AsmToken::Integer))
8908 return ParseStatus::NoMatch;
8909
8910 if (getLexer().peekTok().isNot(AsmToken::Colon))
8911 return ParseStatus::NoMatch;
8912
8913 const MCExpr *ImmF;
8914 if (getParser().parseExpression(ImmF))
8915 return ParseStatus::NoMatch;
8916
8917 if (getTok().isNot(AsmToken::Colon))
8918 return ParseStatus::NoMatch;
8919
8920 Lex(); // Eat ':'
8921 if (getTok().isNot(AsmToken::Integer))
8922 return ParseStatus::NoMatch;
8923
8924 SMLoc E = getTok().getLoc();
8925 const MCExpr *ImmL;
8926 if (getParser().parseExpression(ImmL))
8927 return ParseStatus::NoMatch;
8928
8929 unsigned ImmFVal = cast<MCConstantExpr>(ImmF)->getValue();
8930 unsigned ImmLVal = cast<MCConstantExpr>(ImmL)->getValue();
8931
8932 Operands.push_back(
8933 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
8934 return ParseStatus::Success;
8935}
8936
8937template <int Adj>
8938ParseStatus AArch64AsmParser::tryParseAdjImm0_63(OperandVector &Operands) {
8939 SMLoc S = getLoc();
8940
8941 parseOptionalToken(AsmToken::Hash);
8942 bool IsNegative = parseOptionalToken(AsmToken::Minus);
8943
8944 if (getTok().isNot(AsmToken::Integer))
8945 return ParseStatus::NoMatch;
8946
8947 const MCExpr *Ex;
8948 if (getParser().parseExpression(Ex))
8949 return ParseStatus::NoMatch;
8950
8951 int64_t Imm = dyn_cast<MCConstantExpr>(Ex)->getValue();
8952 if (IsNegative)
8953 Imm = -Imm;
8954
8955 // We want an adjusted immediate in the range [0, 63]. If we don't have one,
8956 // return a value, which is certain to trigger a error message about invalid
8957 // immediate range instead of a non-descriptive invalid operand error.
8958 static_assert(Adj == 1 || Adj == -1, "Unsafe immediate adjustment");
8959 if (Imm == INT64_MIN || Imm == INT64_MAX || Imm + Adj < 0 || Imm + Adj > 63)
8960 Imm = -2;
8961 else
8962 Imm += Adj;
8963
8964 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
8965 Operands.push_back(AArch64Operand::CreateImm(
8967
8968 return ParseStatus::Success;
8969}
static bool isGPR64(unsigned Reg, unsigned SubReg, const MachineRegisterInfo *MRI)
#define MATCH_HASH_MINUS(N)
static unsigned matchSVEDataVectorRegName(StringRef Name)
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, SmallVector< StringRef, 4 > &RequestedExtensions)
static unsigned matchSVEPredicateAsCounterRegName(StringRef Name)
static MCRegister MatchRegisterName(StringRef Name)
static bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg)
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser()
Force static initialization.
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchNeonVectorRegName(StringRef Name)
}
static std::optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (elements, element-width) if Suffix is a valid vector kind.
static unsigned matchMatrixRegName(StringRef Name)
static unsigned matchMatrixTileListRegName(StringRef Name)
static std::string AArch64MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static SMLoc incrementLoc(SMLoc L, int Offset)
#define MATCH_HASH(N)
static const struct Extension ExtensionMap[]
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
static unsigned matchSVEPredicateVectorRegName(StringRef Name)
static AArch64CC::CondCode parseCondCode(ArrayRef< MachineOperand > Cond)
static SDValue getCondCode(SelectionDAG &DAG, AArch64CC::CondCode CC)
Like SelectionDAG::getCondCode(), but for AArch64 condition codes.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
#define X(NUM, ENUM, NAME)
Definition ELF.h:849
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
@ Default
Value * getPointer(Value *Ptr)
static LVOptions Options
Definition LVOptions.cpp:25
Live Register Matrix
loop data Loop Data Prefetch
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
#define T
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
APInt bitcastToAPInt() const
Definition APFloat.h:1408
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition APInt.h:436
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition APInt.h:433
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1577
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition AsmLexer.h:121
void UnLex(AsmToken const &Token)
Definition AsmLexer.h:106
LLVM_ABI SMLoc getLoc() const
Definition AsmLexer.cpp:31
int64_t getIntVal() const
Definition MCAsmMacro.h:108
bool isNot(TokenKind K) const
Definition MCAsmMacro.h:76
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition MCAsmMacro.h:103
bool is(TokenKind K) const
Definition MCAsmMacro.h:75
LLVM_ABI SMLoc getEndLoc() const
Definition AsmLexer.cpp:33
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition MCAsmMacro.h:92
Base class for user error types.
Definition Error.h:354
Container class for subtarget features.
constexpr size_t size() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
void printExpr(raw_ostream &, const MCExpr &) const
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
AsmLexer & getLexer()
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
static LLVM_ABI const MCBinaryExpr * create(Opcode Op, const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:201
@ Sub
Subtraction.
Definition MCExpr.h:324
@ Add
Addition.
Definition MCExpr.h:302
int64_t getValue() const
Definition MCExpr.h:171
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
const MCRegisterInfo * getRegisterInfo() const
Definition MCContext.h:414
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
Definition MCExpr.cpp:450
SMLoc getLoc() const
Definition MCExpr.h:86
unsigned getNumOperands() const
Definition MCInst.h:212
void setLoc(SMLoc loc)
Definition MCInst.h:207
unsigned getOpcode() const
Definition MCInst.h:202
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
const MCOperand & getOperand(unsigned i) const
Definition MCInst.h:210
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isImm() const
Definition MCInst.h:66
bool isReg() const
Definition MCInst.h:65
MCRegister getReg() const
Returns the register number.
Definition MCInst.h:73
const MCExpr * getExpr() const
Definition MCInst.h:118
bool isExpr() const
Definition MCInst.h:69
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual MCRegister getReg() const =0
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
constexpr unsigned id() const
Definition MCRegister.h:82
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:743
Streaming machine code generation interface.
Definition MCStreamer.h:221
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:332
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
const FeatureBitset & ClearFeatureBitsTransitively(const FeatureBitset &FB)
const FeatureBitset & SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
VariantKind getKind() const
Definition MCExpr.h:232
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two operands are registers and are equal.
const MCSymbol * getAddSym() const
Definition MCValue.h:49
int64_t getConstant() const
Definition MCValue.h:44
uint32_t getSpecifier() const
Definition MCValue.h:46
const MCSymbol * getSubSym() const
Definition MCValue.h:51
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr bool isNoMatch() const
constexpr unsigned id() const
Definition Register.h:100
Represents a location in source code.
Definition SMLoc.h:22
static SMLoc getFromPointer(const char *Ptr)
Definition SMLoc.h:35
constexpr const char * getPointer() const
Definition SMLoc.h:33
void insert_range(Range &&R)
Definition SmallSet.h:196
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:229
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:184
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
iterator end()
Definition StringMap.h:224
iterator find(StringRef Key)
Definition StringMap.h:237
void erase(iterator I)
Definition StringMap.h:427
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
Definition StringMap.h:321
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:730
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:490
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:258
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:140
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition StringRef.h:629
LLVM_ABI std::string upper() const
Convert the given ASCII string to uppercase.
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:143
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:137
StringRef take_back(size_t N=1) const
Return a StringRef equal to 'this' but with only the last N elements remaining.
Definition StringRef.h:609
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition StringRef.h:844
LLVM_ABI std::string lower() const
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition StringRef.h:169
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition Triple.h:823
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define INT64_MIN
Definition DataTypes.h:74
#define INT64_MAX
Definition DataTypes.h:71
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SubsectionType getTypeID(StringRef Type)
StringRef getVendorName(unsigned const Vendor)
StringRef getOptionalStr(unsigned Optional)
VendorID
AArch64 build attributes vendors IDs (a.k.a subsection name)
SubsectionOptional getOptionalID(StringRef Optional)
FeatureAndBitsTags getFeatureAndBitsTagsID(StringRef FeatureAndBitsTag)
VendorID getVendorID(StringRef const Vendor)
PauthABITags getPauthABITagsID(StringRef PauthABITag)
StringRef getTypeStr(unsigned Type)
static CondCode getInvertedCondCode(CondCode Code)
const PHint * lookupPHintByName(StringRef)
uint32_t parseGenericRegister(StringRef Name)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static bool isSVEAddSubImm(int64_t Imm)
Returns true if Imm is valid for ADD/SUB.
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static float getFPImmFloat(unsigned Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static bool isSVECpyImm(int64_t Imm)
Returns true if Imm is valid for CPY/DUP.
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
Specifier parsePercentSpecifierName(StringRef)
LLVM_ABI const ArchInfo * parseArch(StringRef Arch)
LLVM_ABI const ArchInfo * getArchForCpu(StringRef CPU)
LLVM_ABI bool getExtensionFeatures(const AArch64::ExtensionBitset &Extensions, std::vector< StringRef > &Features)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isPredicated(const MCInst &MI, const MCInstrInfo *MCII)
@ Entry
Definition COFF.h:862
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
float getFPImm(unsigned Imm)
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
constexpr double e
NodeAddr< CodeNode * > Code
Definition RDFGraph.h:388
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
static std::optional< AArch64PACKey::ID > AArch64StringToPACKeyID(StringRef Name)
Return numeric key ID for 2-letter identifier string.
bool errorToBool(Error Err)
Helper for converting an Error to a bool.
Definition Error.h:1113
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
static int MCLOHNameToId(StringRef Name)
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
static bool isMem(const MachineInstr &MI, unsigned Op)
LLVM_ABI std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \t\n\v\f\r")
getToken - This function extracts one token from source, ignoring any leading characters that appear ...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
Target & getTheAArch64beTarget()
static StringRef MCLOHDirectiveName()
std::string utostr(uint64_t X, bool isNeg=false)
static bool isValidMCLOHType(unsigned Kind)
Op::Description Desc
Target & getTheAArch64leTarget()
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
SmallVectorImpl< std::unique_ptr< MCParsedAsmOperand > > OperandVector
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
Target & getTheARM64_32Target()
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
static int MCLOHIdToNbArgs(MCLOHType Kind)
std::string join(IteratorT Begin, IteratorT End, StringRef Separator)
Joins the strings in the range [Begin, End), adding Separator between the elements.
static MCRegister getXRegFromWReg(MCRegister Reg)
MCLOHType
Linker Optimization Hint Type.
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getWRegFromXReg(MCRegister Reg)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1772
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
#define N
const FeatureBitset Features
const char * Name
AArch64::ExtensionBitset DefaultExts
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
bool haveFeatures(FeatureBitset ActiveFeatures) const
FeatureBitset getRequiredFeatures() const
const char * Name