LLVM 22.0.0git
X86AsmParser.cpp
Go to the documentation of this file.
1//===-- X86AsmParser.cpp - Parse X86 assembly to MCInst instructions ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
17#include "X86Operand.h"
18#include "X86RegisterInfo.h"
19#include "llvm-c/Visibility.h"
20#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/Twine.h"
26#include "llvm/MC/MCContext.h"
27#include "llvm/MC/MCExpr.h"
28#include "llvm/MC/MCInst.h"
29#include "llvm/MC/MCInstrInfo.h"
34#include "llvm/MC/MCRegister.h"
36#include "llvm/MC/MCSection.h"
37#include "llvm/MC/MCStreamer.h"
39#include "llvm/MC/MCSymbol.h"
45#include <algorithm>
46#include <cstdint>
47#include <memory>
48
49using namespace llvm;
50
52 "x86-experimental-lvi-inline-asm-hardening",
53 cl::desc("Harden inline assembly code that may be vulnerable to Load Value"
54 " Injection (LVI). This feature is experimental."), cl::Hidden);
55
56static bool checkScale(unsigned Scale, StringRef &ErrMsg) {
57 if (Scale != 1 && Scale != 2 && Scale != 4 && Scale != 8) {
58 ErrMsg = "scale factor in address must be 1, 2, 4 or 8";
59 return true;
60 }
61 return false;
62}
63
64namespace {
65
66// Including the generated SSE2AVX compression tables.
67#define GET_X86_SSE2AVX_TABLE
68#include "X86GenInstrMapping.inc"
69
70static const char OpPrecedence[] = {
71 0, // IC_OR
72 1, // IC_XOR
73 2, // IC_AND
74 4, // IC_LSHIFT
75 4, // IC_RSHIFT
76 5, // IC_PLUS
77 5, // IC_MINUS
78 6, // IC_MULTIPLY
79 6, // IC_DIVIDE
80 6, // IC_MOD
81 7, // IC_NOT
82 8, // IC_NEG
83 9, // IC_RPAREN
84 10, // IC_LPAREN
85 0, // IC_IMM
86 0, // IC_REGISTER
87 3, // IC_EQ
88 3, // IC_NE
89 3, // IC_LT
90 3, // IC_LE
91 3, // IC_GT
92 3 // IC_GE
93};
94
95class X86AsmParser : public MCTargetAsmParser {
96 ParseInstructionInfo *InstInfo;
97 bool Code16GCC;
98 unsigned ForcedDataPrefix = 0;
99
100 enum OpcodePrefix {
101 OpcodePrefix_Default,
102 OpcodePrefix_REX,
103 OpcodePrefix_REX2,
104 OpcodePrefix_VEX,
105 OpcodePrefix_VEX2,
106 OpcodePrefix_VEX3,
107 OpcodePrefix_EVEX,
108 };
109
110 OpcodePrefix ForcedOpcodePrefix = OpcodePrefix_Default;
111
112 enum DispEncoding {
113 DispEncoding_Default,
114 DispEncoding_Disp8,
115 DispEncoding_Disp32,
116 };
117
118 DispEncoding ForcedDispEncoding = DispEncoding_Default;
119
120 // Does this instruction use apx extended register?
121 bool UseApxExtendedReg = false;
122 // Is this instruction explicitly required not to update flags?
123 bool ForcedNoFlag = false;
124
125private:
126 SMLoc consumeToken() {
127 MCAsmParser &Parser = getParser();
128 SMLoc Result = Parser.getTok().getLoc();
129 Parser.Lex();
130 return Result;
131 }
132
133 bool tokenIsStartOfStatement(AsmToken::TokenKind Token) override {
134 return Token == AsmToken::LCurly;
135 }
136
137 X86TargetStreamer &getTargetStreamer() {
138 assert(getParser().getStreamer().getTargetStreamer() &&
139 "do not have a target streamer");
140 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
141 return static_cast<X86TargetStreamer &>(TS);
142 }
143
144 unsigned MatchInstruction(const OperandVector &Operands, MCInst &Inst,
145 uint64_t &ErrorInfo, FeatureBitset &MissingFeatures,
146 bool matchingInlineAsm, unsigned VariantID = 0) {
147 // In Code16GCC mode, match as 32-bit.
148 if (Code16GCC)
149 SwitchMode(X86::Is32Bit);
150 unsigned rv = MatchInstructionImpl(Operands, Inst, ErrorInfo,
151 MissingFeatures, matchingInlineAsm,
152 VariantID);
153 if (Code16GCC)
154 SwitchMode(X86::Is16Bit);
155 return rv;
156 }
157
158 enum InfixCalculatorTok {
159 IC_OR = 0,
160 IC_XOR,
161 IC_AND,
162 IC_LSHIFT,
163 IC_RSHIFT,
164 IC_PLUS,
165 IC_MINUS,
166 IC_MULTIPLY,
167 IC_DIVIDE,
168 IC_MOD,
169 IC_NOT,
170 IC_NEG,
171 IC_RPAREN,
172 IC_LPAREN,
173 IC_IMM,
174 IC_REGISTER,
175 IC_EQ,
176 IC_NE,
177 IC_LT,
178 IC_LE,
179 IC_GT,
180 IC_GE
181 };
182
183 enum IntelOperatorKind {
184 IOK_INVALID = 0,
185 IOK_LENGTH,
186 IOK_SIZE,
187 IOK_TYPE,
188 };
189
190 enum MasmOperatorKind {
191 MOK_INVALID = 0,
192 MOK_LENGTHOF,
193 MOK_SIZEOF,
194 MOK_TYPE,
195 };
196
197 class InfixCalculator {
198 typedef std::pair< InfixCalculatorTok, int64_t > ICToken;
199 SmallVector<InfixCalculatorTok, 4> InfixOperatorStack;
200 SmallVector<ICToken, 4> PostfixStack;
201
202 bool isUnaryOperator(InfixCalculatorTok Op) const {
203 return Op == IC_NEG || Op == IC_NOT;
204 }
205
206 public:
207 int64_t popOperand() {
208 assert (!PostfixStack.empty() && "Poped an empty stack!");
209 ICToken Op = PostfixStack.pop_back_val();
210 if (!(Op.first == IC_IMM || Op.first == IC_REGISTER))
211 return -1; // The invalid Scale value will be caught later by checkScale
212 return Op.second;
213 }
214 void pushOperand(InfixCalculatorTok Op, int64_t Val = 0) {
215 assert ((Op == IC_IMM || Op == IC_REGISTER) &&
216 "Unexpected operand!");
217 PostfixStack.push_back(std::make_pair(Op, Val));
218 }
219
220 void popOperator() { InfixOperatorStack.pop_back(); }
221 void pushOperator(InfixCalculatorTok Op) {
222 // Push the new operator if the stack is empty.
223 if (InfixOperatorStack.empty()) {
224 InfixOperatorStack.push_back(Op);
225 return;
226 }
227
228 // Push the new operator if it has a higher precedence than the operator
229 // on the top of the stack or the operator on the top of the stack is a
230 // left parentheses.
231 unsigned Idx = InfixOperatorStack.size() - 1;
232 InfixCalculatorTok StackOp = InfixOperatorStack[Idx];
233 if (OpPrecedence[Op] > OpPrecedence[StackOp] || StackOp == IC_LPAREN) {
234 InfixOperatorStack.push_back(Op);
235 return;
236 }
237
238 // The operator on the top of the stack has higher precedence than the
239 // new operator.
240 unsigned ParenCount = 0;
241 while (true) {
242 // Nothing to process.
243 if (InfixOperatorStack.empty())
244 break;
245
246 Idx = InfixOperatorStack.size() - 1;
247 StackOp = InfixOperatorStack[Idx];
248 if (!(OpPrecedence[StackOp] >= OpPrecedence[Op] || ParenCount))
249 break;
250
251 // If we have an even parentheses count and we see a left parentheses,
252 // then stop processing.
253 if (!ParenCount && StackOp == IC_LPAREN)
254 break;
255
256 if (StackOp == IC_RPAREN) {
257 ++ParenCount;
258 InfixOperatorStack.pop_back();
259 } else if (StackOp == IC_LPAREN) {
260 --ParenCount;
261 InfixOperatorStack.pop_back();
262 } else {
263 InfixOperatorStack.pop_back();
264 PostfixStack.push_back(std::make_pair(StackOp, 0));
265 }
266 }
267 // Push the new operator.
268 InfixOperatorStack.push_back(Op);
269 }
270
271 int64_t execute() {
272 // Push any remaining operators onto the postfix stack.
273 while (!InfixOperatorStack.empty()) {
274 InfixCalculatorTok StackOp = InfixOperatorStack.pop_back_val();
275 if (StackOp != IC_LPAREN && StackOp != IC_RPAREN)
276 PostfixStack.push_back(std::make_pair(StackOp, 0));
277 }
278
279 if (PostfixStack.empty())
280 return 0;
281
282 SmallVector<ICToken, 16> OperandStack;
283 for (const ICToken &Op : PostfixStack) {
284 if (Op.first == IC_IMM || Op.first == IC_REGISTER) {
285 OperandStack.push_back(Op);
286 } else if (isUnaryOperator(Op.first)) {
287 assert (OperandStack.size() > 0 && "Too few operands.");
288 ICToken Operand = OperandStack.pop_back_val();
289 assert (Operand.first == IC_IMM &&
290 "Unary operation with a register!");
291 switch (Op.first) {
292 default:
293 report_fatal_error("Unexpected operator!");
294 break;
295 case IC_NEG:
296 OperandStack.push_back(std::make_pair(IC_IMM, -Operand.second));
297 break;
298 case IC_NOT:
299 OperandStack.push_back(std::make_pair(IC_IMM, ~Operand.second));
300 break;
301 }
302 } else {
303 assert (OperandStack.size() > 1 && "Too few operands.");
304 int64_t Val;
305 ICToken Op2 = OperandStack.pop_back_val();
306 ICToken Op1 = OperandStack.pop_back_val();
307 switch (Op.first) {
308 default:
309 report_fatal_error("Unexpected operator!");
310 break;
311 case IC_PLUS:
312 Val = Op1.second + Op2.second;
313 OperandStack.push_back(std::make_pair(IC_IMM, Val));
314 break;
315 case IC_MINUS:
316 Val = Op1.second - Op2.second;
317 OperandStack.push_back(std::make_pair(IC_IMM, Val));
318 break;
319 case IC_MULTIPLY:
320 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
321 "Multiply operation with an immediate and a register!");
322 Val = Op1.second * Op2.second;
323 OperandStack.push_back(std::make_pair(IC_IMM, Val));
324 break;
325 case IC_DIVIDE:
326 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
327 "Divide operation with an immediate and a register!");
328 assert (Op2.second != 0 && "Division by zero!");
329 Val = Op1.second / Op2.second;
330 OperandStack.push_back(std::make_pair(IC_IMM, Val));
331 break;
332 case IC_MOD:
333 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
334 "Modulo operation with an immediate and a register!");
335 Val = Op1.second % Op2.second;
336 OperandStack.push_back(std::make_pair(IC_IMM, Val));
337 break;
338 case IC_OR:
339 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
340 "Or operation with an immediate and a register!");
341 Val = Op1.second | Op2.second;
342 OperandStack.push_back(std::make_pair(IC_IMM, Val));
343 break;
344 case IC_XOR:
345 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
346 "Xor operation with an immediate and a register!");
347 Val = Op1.second ^ Op2.second;
348 OperandStack.push_back(std::make_pair(IC_IMM, Val));
349 break;
350 case IC_AND:
351 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
352 "And operation with an immediate and a register!");
353 Val = Op1.second & Op2.second;
354 OperandStack.push_back(std::make_pair(IC_IMM, Val));
355 break;
356 case IC_LSHIFT:
357 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
358 "Left shift operation with an immediate and a register!");
359 Val = Op1.second << Op2.second;
360 OperandStack.push_back(std::make_pair(IC_IMM, Val));
361 break;
362 case IC_RSHIFT:
363 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
364 "Right shift operation with an immediate and a register!");
365 Val = Op1.second >> Op2.second;
366 OperandStack.push_back(std::make_pair(IC_IMM, Val));
367 break;
368 case IC_EQ:
369 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
370 "Equals operation with an immediate and a register!");
371 Val = (Op1.second == Op2.second) ? -1 : 0;
372 OperandStack.push_back(std::make_pair(IC_IMM, Val));
373 break;
374 case IC_NE:
375 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
376 "Not-equals operation with an immediate and a register!");
377 Val = (Op1.second != Op2.second) ? -1 : 0;
378 OperandStack.push_back(std::make_pair(IC_IMM, Val));
379 break;
380 case IC_LT:
381 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
382 "Less-than operation with an immediate and a register!");
383 Val = (Op1.second < Op2.second) ? -1 : 0;
384 OperandStack.push_back(std::make_pair(IC_IMM, Val));
385 break;
386 case IC_LE:
387 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
388 "Less-than-or-equal operation with an immediate and a "
389 "register!");
390 Val = (Op1.second <= Op2.second) ? -1 : 0;
391 OperandStack.push_back(std::make_pair(IC_IMM, Val));
392 break;
393 case IC_GT:
394 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
395 "Greater-than operation with an immediate and a register!");
396 Val = (Op1.second > Op2.second) ? -1 : 0;
397 OperandStack.push_back(std::make_pair(IC_IMM, Val));
398 break;
399 case IC_GE:
400 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
401 "Greater-than-or-equal operation with an immediate and a "
402 "register!");
403 Val = (Op1.second >= Op2.second) ? -1 : 0;
404 OperandStack.push_back(std::make_pair(IC_IMM, Val));
405 break;
406 }
407 }
408 }
409 assert (OperandStack.size() == 1 && "Expected a single result.");
410 return OperandStack.pop_back_val().second;
411 }
412 };
413
414 enum IntelExprState {
415 IES_INIT,
416 IES_OR,
417 IES_XOR,
418 IES_AND,
419 IES_EQ,
420 IES_NE,
421 IES_LT,
422 IES_LE,
423 IES_GT,
424 IES_GE,
425 IES_LSHIFT,
426 IES_RSHIFT,
427 IES_PLUS,
428 IES_MINUS,
429 IES_OFFSET,
430 IES_CAST,
431 IES_NOT,
432 IES_MULTIPLY,
433 IES_DIVIDE,
434 IES_MOD,
435 IES_LBRAC,
436 IES_RBRAC,
437 IES_LPAREN,
438 IES_RPAREN,
439 IES_REGISTER,
440 IES_INTEGER,
441 IES_ERROR
442 };
443
444 class IntelExprStateMachine {
445 IntelExprState State = IES_INIT, PrevState = IES_ERROR;
446 MCRegister BaseReg, IndexReg, TmpReg;
447 unsigned Scale = 0;
448 int64_t Imm = 0;
449 const MCExpr *Sym = nullptr;
450 StringRef SymName;
451 InfixCalculator IC;
452 InlineAsmIdentifierInfo Info;
453 short BracCount = 0;
454 bool MemExpr = false;
455 bool BracketUsed = false;
456 bool OffsetOperator = false;
457 bool AttachToOperandIdx = false;
458 bool IsPIC = false;
459 SMLoc OffsetOperatorLoc;
460 AsmTypeInfo CurType;
461
462 bool setSymRef(const MCExpr *Val, StringRef ID, StringRef &ErrMsg) {
463 if (Sym) {
464 ErrMsg = "cannot use more than one symbol in memory operand";
465 return true;
466 }
467 Sym = Val;
468 SymName = ID;
469 return false;
470 }
471
472 public:
473 IntelExprStateMachine() = default;
474
475 void addImm(int64_t imm) { Imm += imm; }
476 short getBracCount() const { return BracCount; }
477 bool isMemExpr() const { return MemExpr; }
478 bool isBracketUsed() const { return BracketUsed; }
479 bool isOffsetOperator() const { return OffsetOperator; }
480 SMLoc getOffsetLoc() const { return OffsetOperatorLoc; }
481 MCRegister getBaseReg() const { return BaseReg; }
482 MCRegister getIndexReg() const { return IndexReg; }
483 unsigned getScale() const { return Scale; }
484 const MCExpr *getSym() const { return Sym; }
485 StringRef getSymName() const { return SymName; }
486 StringRef getType() const { return CurType.Name; }
487 unsigned getSize() const { return CurType.Size; }
488 unsigned getElementSize() const { return CurType.ElementSize; }
489 unsigned getLength() const { return CurType.Length; }
490 int64_t getImm() { return Imm + IC.execute(); }
491 bool isValidEndState() const {
492 return State == IES_RBRAC || State == IES_RPAREN ||
493 State == IES_INTEGER || State == IES_REGISTER ||
494 State == IES_OFFSET;
495 }
496
497 // Is the intel expression appended after an operand index.
498 // [OperandIdx][Intel Expression]
499 // This is neccessary for checking if it is an independent
500 // intel expression at back end when parse inline asm.
501 void setAppendAfterOperand() { AttachToOperandIdx = true; }
502
503 bool isPIC() const { return IsPIC; }
504 void setPIC() { IsPIC = true; }
505
506 bool hadError() const { return State == IES_ERROR; }
507 const InlineAsmIdentifierInfo &getIdentifierInfo() const { return Info; }
508
509 bool regsUseUpError(StringRef &ErrMsg) {
510 // This case mostly happen in inline asm, e.g. Arr[BaseReg + IndexReg]
511 // can not intruduce additional register in inline asm in PIC model.
512 if (IsPIC && AttachToOperandIdx)
513 ErrMsg = "Don't use 2 or more regs for mem offset in PIC model!";
514 else
515 ErrMsg = "BaseReg/IndexReg already set!";
516 return true;
517 }
518
519 void onOr() {
520 IntelExprState CurrState = State;
521 switch (State) {
522 default:
523 State = IES_ERROR;
524 break;
525 case IES_INTEGER:
526 case IES_RPAREN:
527 case IES_REGISTER:
528 State = IES_OR;
529 IC.pushOperator(IC_OR);
530 break;
531 }
532 PrevState = CurrState;
533 }
534 void onXor() {
535 IntelExprState CurrState = State;
536 switch (State) {
537 default:
538 State = IES_ERROR;
539 break;
540 case IES_INTEGER:
541 case IES_RPAREN:
542 case IES_REGISTER:
543 State = IES_XOR;
544 IC.pushOperator(IC_XOR);
545 break;
546 }
547 PrevState = CurrState;
548 }
549 void onAnd() {
550 IntelExprState CurrState = State;
551 switch (State) {
552 default:
553 State = IES_ERROR;
554 break;
555 case IES_INTEGER:
556 case IES_RPAREN:
557 case IES_REGISTER:
558 State = IES_AND;
559 IC.pushOperator(IC_AND);
560 break;
561 }
562 PrevState = CurrState;
563 }
564 void onEq() {
565 IntelExprState CurrState = State;
566 switch (State) {
567 default:
568 State = IES_ERROR;
569 break;
570 case IES_INTEGER:
571 case IES_RPAREN:
572 case IES_REGISTER:
573 State = IES_EQ;
574 IC.pushOperator(IC_EQ);
575 break;
576 }
577 PrevState = CurrState;
578 }
579 void onNE() {
580 IntelExprState CurrState = State;
581 switch (State) {
582 default:
583 State = IES_ERROR;
584 break;
585 case IES_INTEGER:
586 case IES_RPAREN:
587 case IES_REGISTER:
588 State = IES_NE;
589 IC.pushOperator(IC_NE);
590 break;
591 }
592 PrevState = CurrState;
593 }
594 void onLT() {
595 IntelExprState CurrState = State;
596 switch (State) {
597 default:
598 State = IES_ERROR;
599 break;
600 case IES_INTEGER:
601 case IES_RPAREN:
602 case IES_REGISTER:
603 State = IES_LT;
604 IC.pushOperator(IC_LT);
605 break;
606 }
607 PrevState = CurrState;
608 }
609 void onLE() {
610 IntelExprState CurrState = State;
611 switch (State) {
612 default:
613 State = IES_ERROR;
614 break;
615 case IES_INTEGER:
616 case IES_RPAREN:
617 case IES_REGISTER:
618 State = IES_LE;
619 IC.pushOperator(IC_LE);
620 break;
621 }
622 PrevState = CurrState;
623 }
624 void onGT() {
625 IntelExprState CurrState = State;
626 switch (State) {
627 default:
628 State = IES_ERROR;
629 break;
630 case IES_INTEGER:
631 case IES_RPAREN:
632 case IES_REGISTER:
633 State = IES_GT;
634 IC.pushOperator(IC_GT);
635 break;
636 }
637 PrevState = CurrState;
638 }
639 void onGE() {
640 IntelExprState CurrState = State;
641 switch (State) {
642 default:
643 State = IES_ERROR;
644 break;
645 case IES_INTEGER:
646 case IES_RPAREN:
647 case IES_REGISTER:
648 State = IES_GE;
649 IC.pushOperator(IC_GE);
650 break;
651 }
652 PrevState = CurrState;
653 }
654 void onLShift() {
655 IntelExprState CurrState = State;
656 switch (State) {
657 default:
658 State = IES_ERROR;
659 break;
660 case IES_INTEGER:
661 case IES_RPAREN:
662 case IES_REGISTER:
663 State = IES_LSHIFT;
664 IC.pushOperator(IC_LSHIFT);
665 break;
666 }
667 PrevState = CurrState;
668 }
669 void onRShift() {
670 IntelExprState CurrState = State;
671 switch (State) {
672 default:
673 State = IES_ERROR;
674 break;
675 case IES_INTEGER:
676 case IES_RPAREN:
677 case IES_REGISTER:
678 State = IES_RSHIFT;
679 IC.pushOperator(IC_RSHIFT);
680 break;
681 }
682 PrevState = CurrState;
683 }
684 bool onPlus(StringRef &ErrMsg) {
685 IntelExprState CurrState = State;
686 switch (State) {
687 default:
688 State = IES_ERROR;
689 break;
690 case IES_INTEGER:
691 case IES_RPAREN:
692 case IES_REGISTER:
693 case IES_OFFSET:
694 State = IES_PLUS;
695 IC.pushOperator(IC_PLUS);
696 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
697 // If we already have a BaseReg, then assume this is the IndexReg with
698 // no explicit scale.
699 if (!BaseReg) {
700 BaseReg = TmpReg;
701 } else {
702 if (IndexReg)
703 return regsUseUpError(ErrMsg);
704 IndexReg = TmpReg;
705 Scale = 0;
706 }
707 }
708 break;
709 }
710 PrevState = CurrState;
711 return false;
712 }
713 bool onMinus(StringRef &ErrMsg) {
714 IntelExprState CurrState = State;
715 switch (State) {
716 default:
717 State = IES_ERROR;
718 break;
719 case IES_OR:
720 case IES_XOR:
721 case IES_AND:
722 case IES_EQ:
723 case IES_NE:
724 case IES_LT:
725 case IES_LE:
726 case IES_GT:
727 case IES_GE:
728 case IES_LSHIFT:
729 case IES_RSHIFT:
730 case IES_PLUS:
731 case IES_NOT:
732 case IES_MULTIPLY:
733 case IES_DIVIDE:
734 case IES_MOD:
735 case IES_LPAREN:
736 case IES_RPAREN:
737 case IES_LBRAC:
738 case IES_RBRAC:
739 case IES_INTEGER:
740 case IES_REGISTER:
741 case IES_INIT:
742 case IES_OFFSET:
743 State = IES_MINUS;
744 // push minus operator if it is not a negate operator
745 if (CurrState == IES_REGISTER || CurrState == IES_RPAREN ||
746 CurrState == IES_INTEGER || CurrState == IES_RBRAC ||
747 CurrState == IES_OFFSET)
748 IC.pushOperator(IC_MINUS);
749 else if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
750 // We have negate operator for Scale: it's illegal
751 ErrMsg = "Scale can't be negative";
752 return true;
753 } else
754 IC.pushOperator(IC_NEG);
755 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
756 // If we already have a BaseReg, then assume this is the IndexReg with
757 // no explicit scale.
758 if (!BaseReg) {
759 BaseReg = TmpReg;
760 } else {
761 if (IndexReg)
762 return regsUseUpError(ErrMsg);
763 IndexReg = TmpReg;
764 Scale = 0;
765 }
766 }
767 break;
768 }
769 PrevState = CurrState;
770 return false;
771 }
772 void onNot() {
773 IntelExprState CurrState = State;
774 switch (State) {
775 default:
776 State = IES_ERROR;
777 break;
778 case IES_OR:
779 case IES_XOR:
780 case IES_AND:
781 case IES_EQ:
782 case IES_NE:
783 case IES_LT:
784 case IES_LE:
785 case IES_GT:
786 case IES_GE:
787 case IES_LSHIFT:
788 case IES_RSHIFT:
789 case IES_PLUS:
790 case IES_MINUS:
791 case IES_NOT:
792 case IES_MULTIPLY:
793 case IES_DIVIDE:
794 case IES_MOD:
795 case IES_LPAREN:
796 case IES_LBRAC:
797 case IES_INIT:
798 State = IES_NOT;
799 IC.pushOperator(IC_NOT);
800 break;
801 }
802 PrevState = CurrState;
803 }
804 bool onRegister(MCRegister Reg, StringRef &ErrMsg) {
805 IntelExprState CurrState = State;
806 switch (State) {
807 default:
808 State = IES_ERROR;
809 break;
810 case IES_PLUS:
811 case IES_LPAREN:
812 case IES_LBRAC:
813 State = IES_REGISTER;
814 TmpReg = Reg;
815 IC.pushOperand(IC_REGISTER);
816 break;
817 case IES_MULTIPLY:
818 // Index Register - Scale * Register
819 if (PrevState == IES_INTEGER) {
820 if (IndexReg)
821 return regsUseUpError(ErrMsg);
822 State = IES_REGISTER;
823 IndexReg = Reg;
824 // Get the scale and replace the 'Scale * Register' with '0'.
825 Scale = IC.popOperand();
826 if (checkScale(Scale, ErrMsg))
827 return true;
828 IC.pushOperand(IC_IMM);
829 IC.popOperator();
830 } else {
831 State = IES_ERROR;
832 }
833 break;
834 }
835 PrevState = CurrState;
836 return false;
837 }
838 bool onIdentifierExpr(const MCExpr *SymRef, StringRef SymRefName,
839 const InlineAsmIdentifierInfo &IDInfo,
840 const AsmTypeInfo &Type, bool ParsingMSInlineAsm,
841 StringRef &ErrMsg) {
842 // InlineAsm: Treat an enum value as an integer
843 if (ParsingMSInlineAsm)
845 return onInteger(IDInfo.Enum.EnumVal, ErrMsg);
846 // Treat a symbolic constant like an integer
847 if (auto *CE = dyn_cast<MCConstantExpr>(SymRef))
848 return onInteger(CE->getValue(), ErrMsg);
849 PrevState = State;
850 switch (State) {
851 default:
852 State = IES_ERROR;
853 break;
854 case IES_CAST:
855 case IES_PLUS:
856 case IES_MINUS:
857 case IES_NOT:
858 case IES_INIT:
859 case IES_LBRAC:
860 case IES_LPAREN:
861 if (setSymRef(SymRef, SymRefName, ErrMsg))
862 return true;
863 MemExpr = true;
864 State = IES_INTEGER;
865 IC.pushOperand(IC_IMM);
866 if (ParsingMSInlineAsm)
867 Info = IDInfo;
868 setTypeInfo(Type);
869 break;
870 }
871 return false;
872 }
873 bool onInteger(int64_t TmpInt, StringRef &ErrMsg) {
874 IntelExprState CurrState = State;
875 switch (State) {
876 default:
877 State = IES_ERROR;
878 break;
879 case IES_PLUS:
880 case IES_MINUS:
881 case IES_NOT:
882 case IES_OR:
883 case IES_XOR:
884 case IES_AND:
885 case IES_EQ:
886 case IES_NE:
887 case IES_LT:
888 case IES_LE:
889 case IES_GT:
890 case IES_GE:
891 case IES_LSHIFT:
892 case IES_RSHIFT:
893 case IES_DIVIDE:
894 case IES_MOD:
895 case IES_MULTIPLY:
896 case IES_LPAREN:
897 case IES_INIT:
898 case IES_LBRAC:
899 State = IES_INTEGER;
900 if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
901 // Index Register - Register * Scale
902 if (IndexReg)
903 return regsUseUpError(ErrMsg);
904 IndexReg = TmpReg;
905 Scale = TmpInt;
906 if (checkScale(Scale, ErrMsg))
907 return true;
908 // Get the scale and replace the 'Register * Scale' with '0'.
909 IC.popOperator();
910 } else {
911 IC.pushOperand(IC_IMM, TmpInt);
912 }
913 break;
914 }
915 PrevState = CurrState;
916 return false;
917 }
918 void onStar() {
919 PrevState = State;
920 switch (State) {
921 default:
922 State = IES_ERROR;
923 break;
924 case IES_INTEGER:
925 case IES_REGISTER:
926 case IES_RPAREN:
927 State = IES_MULTIPLY;
928 IC.pushOperator(IC_MULTIPLY);
929 break;
930 }
931 }
932 void onDivide() {
933 PrevState = State;
934 switch (State) {
935 default:
936 State = IES_ERROR;
937 break;
938 case IES_INTEGER:
939 case IES_RPAREN:
940 State = IES_DIVIDE;
941 IC.pushOperator(IC_DIVIDE);
942 break;
943 }
944 }
945 void onMod() {
946 PrevState = State;
947 switch (State) {
948 default:
949 State = IES_ERROR;
950 break;
951 case IES_INTEGER:
952 case IES_RPAREN:
953 State = IES_MOD;
954 IC.pushOperator(IC_MOD);
955 break;
956 }
957 }
958 bool onLBrac() {
959 if (BracCount)
960 return true;
961 PrevState = State;
962 switch (State) {
963 default:
964 State = IES_ERROR;
965 break;
966 case IES_RBRAC:
967 case IES_INTEGER:
968 case IES_RPAREN:
969 State = IES_PLUS;
970 IC.pushOperator(IC_PLUS);
971 CurType.Length = 1;
972 CurType.Size = CurType.ElementSize;
973 break;
974 case IES_INIT:
975 case IES_CAST:
976 assert(!BracCount && "BracCount should be zero on parsing's start");
977 State = IES_LBRAC;
978 break;
979 }
980 MemExpr = true;
981 BracketUsed = true;
982 BracCount++;
983 return false;
984 }
985 bool onRBrac(StringRef &ErrMsg) {
986 IntelExprState CurrState = State;
987 switch (State) {
988 default:
989 State = IES_ERROR;
990 break;
991 case IES_INTEGER:
992 case IES_OFFSET:
993 case IES_REGISTER:
994 case IES_RPAREN:
995 if (BracCount-- != 1) {
996 ErrMsg = "unexpected bracket encountered";
997 return true;
998 }
999 State = IES_RBRAC;
1000 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
1001 // If we already have a BaseReg, then assume this is the IndexReg with
1002 // no explicit scale.
1003 if (!BaseReg) {
1004 BaseReg = TmpReg;
1005 } else {
1006 if (IndexReg)
1007 return regsUseUpError(ErrMsg);
1008 IndexReg = TmpReg;
1009 Scale = 0;
1010 }
1011 }
1012 break;
1013 }
1014 PrevState = CurrState;
1015 return false;
1016 }
1017 void onLParen() {
1018 IntelExprState CurrState = State;
1019 switch (State) {
1020 default:
1021 State = IES_ERROR;
1022 break;
1023 case IES_PLUS:
1024 case IES_MINUS:
1025 case IES_NOT:
1026 case IES_OR:
1027 case IES_XOR:
1028 case IES_AND:
1029 case IES_EQ:
1030 case IES_NE:
1031 case IES_LT:
1032 case IES_LE:
1033 case IES_GT:
1034 case IES_GE:
1035 case IES_LSHIFT:
1036 case IES_RSHIFT:
1037 case IES_MULTIPLY:
1038 case IES_DIVIDE:
1039 case IES_MOD:
1040 case IES_LPAREN:
1041 case IES_INIT:
1042 case IES_LBRAC:
1043 State = IES_LPAREN;
1044 IC.pushOperator(IC_LPAREN);
1045 break;
1046 }
1047 PrevState = CurrState;
1048 }
1049 bool onRParen(StringRef &ErrMsg) {
1050 IntelExprState CurrState = State;
1051 switch (State) {
1052 default:
1053 State = IES_ERROR;
1054 break;
1055 case IES_INTEGER:
1056 case IES_OFFSET:
1057 case IES_REGISTER:
1058 case IES_RBRAC:
1059 case IES_RPAREN:
1060 State = IES_RPAREN;
1061 // In the case of a multiply, onRegister has already set IndexReg
1062 // directly, with appropriate scale.
1063 // Otherwise if we just saw a register it has only been stored in
1064 // TmpReg, so we need to store it into the state machine.
1065 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
1066 // If we already have a BaseReg, then assume this is the IndexReg with
1067 // no explicit scale.
1068 if (!BaseReg) {
1069 BaseReg = TmpReg;
1070 } else {
1071 if (IndexReg)
1072 return regsUseUpError(ErrMsg);
1073 IndexReg = TmpReg;
1074 Scale = 0;
1075 }
1076 }
1077 IC.pushOperator(IC_RPAREN);
1078 break;
1079 }
1080 PrevState = CurrState;
1081 return false;
1082 }
1083 bool onOffset(const MCExpr *Val, SMLoc OffsetLoc, StringRef ID,
1084 const InlineAsmIdentifierInfo &IDInfo,
1085 bool ParsingMSInlineAsm, StringRef &ErrMsg) {
1086 PrevState = State;
1087 switch (State) {
1088 default:
1089 ErrMsg = "unexpected offset operator expression";
1090 return true;
1091 case IES_PLUS:
1092 case IES_INIT:
1093 case IES_LBRAC:
1094 if (setSymRef(Val, ID, ErrMsg))
1095 return true;
1096 OffsetOperator = true;
1097 OffsetOperatorLoc = OffsetLoc;
1098 State = IES_OFFSET;
1099 // As we cannot yet resolve the actual value (offset), we retain
1100 // the requested semantics by pushing a '0' to the operands stack
1101 IC.pushOperand(IC_IMM);
1102 if (ParsingMSInlineAsm) {
1103 Info = IDInfo;
1104 }
1105 break;
1106 }
1107 return false;
1108 }
1109 void onCast(AsmTypeInfo Info) {
1110 PrevState = State;
1111 switch (State) {
1112 default:
1113 State = IES_ERROR;
1114 break;
1115 case IES_LPAREN:
1116 setTypeInfo(Info);
1117 State = IES_CAST;
1118 break;
1119 }
1120 }
1121 void setTypeInfo(AsmTypeInfo Type) { CurType = Type; }
1122 };
1123
1124 bool Error(SMLoc L, const Twine &Msg, SMRange Range = {},
1125 bool MatchingInlineAsm = false) {
1126 MCAsmParser &Parser = getParser();
1127 if (MatchingInlineAsm) {
1128 return false;
1129 }
1130 return Parser.Error(L, Msg, Range);
1131 }
1132
1133 bool MatchRegisterByName(MCRegister &RegNo, StringRef RegName, SMLoc StartLoc,
1134 SMLoc EndLoc);
1135 bool ParseRegister(MCRegister &RegNo, SMLoc &StartLoc, SMLoc &EndLoc,
1136 bool RestoreOnFailure);
1137
1138 std::unique_ptr<X86Operand> DefaultMemSIOperand(SMLoc Loc);
1139 std::unique_ptr<X86Operand> DefaultMemDIOperand(SMLoc Loc);
1140 bool IsSIReg(MCRegister Reg);
1141 MCRegister GetSIDIForRegClass(unsigned RegClassID, bool IsSIReg);
1142 void
1143 AddDefaultSrcDestOperands(OperandVector &Operands,
1144 std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
1145 std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst);
1146 bool VerifyAndAdjustOperands(OperandVector &OrigOperands,
1147 OperandVector &FinalOperands);
1148 bool parseOperand(OperandVector &Operands, StringRef Name);
1149 bool parseATTOperand(OperandVector &Operands);
1150 bool parseIntelOperand(OperandVector &Operands, StringRef Name);
1151 bool ParseIntelOffsetOperator(const MCExpr *&Val, StringRef &ID,
1152 InlineAsmIdentifierInfo &Info, SMLoc &End);
1153 bool ParseIntelDotOperator(IntelExprStateMachine &SM, SMLoc &End);
1154 unsigned IdentifyIntelInlineAsmOperator(StringRef Name);
1155 unsigned ParseIntelInlineAsmOperator(unsigned OpKind);
1156 unsigned IdentifyMasmOperator(StringRef Name);
1157 bool ParseMasmOperator(unsigned OpKind, int64_t &Val);
1158 bool ParseRoundingModeOp(SMLoc Start, OperandVector &Operands);
1159 bool parseCFlagsOp(OperandVector &Operands);
1160 bool ParseIntelNamedOperator(StringRef Name, IntelExprStateMachine &SM,
1161 bool &ParseError, SMLoc &End);
1162 bool ParseMasmNamedOperator(StringRef Name, IntelExprStateMachine &SM,
1163 bool &ParseError, SMLoc &End);
1164 void RewriteIntelExpression(IntelExprStateMachine &SM, SMLoc Start,
1165 SMLoc End);
1166 bool ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End);
1167 bool ParseIntelInlineAsmIdentifier(const MCExpr *&Val, StringRef &Identifier,
1168 InlineAsmIdentifierInfo &Info,
1169 bool IsUnevaluatedOperand, SMLoc &End,
1170 bool IsParsingOffsetOperator = false);
1171 void tryParseOperandIdx(AsmToken::TokenKind PrevTK,
1172 IntelExprStateMachine &SM);
1173
1174 bool CheckDispOverflow(MCRegister BaseReg, MCRegister IndexReg,
1175 const MCExpr *Disp, SMLoc Loc);
1176
1177 bool ParseMemOperand(MCRegister SegReg, const MCExpr *Disp, SMLoc StartLoc,
1178 SMLoc EndLoc, OperandVector &Operands);
1179
1180 X86::CondCode ParseConditionCode(StringRef CCode);
1181
1182 bool ParseIntelMemoryOperandSize(unsigned &Size, StringRef *SizeStr);
1183 bool CreateMemForMSInlineAsm(MCRegister SegReg, const MCExpr *Disp,
1184 MCRegister BaseReg, MCRegister IndexReg,
1185 unsigned Scale, bool NonAbsMem, SMLoc Start,
1186 SMLoc End, unsigned Size, StringRef Identifier,
1187 const InlineAsmIdentifierInfo &Info,
1188 OperandVector &Operands);
1189
1190 bool parseDirectiveArch();
1191 bool parseDirectiveNops(SMLoc L);
1192 bool parseDirectiveEven(SMLoc L);
1193 bool ParseDirectiveCode(StringRef IDVal, SMLoc L);
1194
1195 /// CodeView FPO data directives.
1196 bool parseDirectiveFPOProc(SMLoc L);
1197 bool parseDirectiveFPOSetFrame(SMLoc L);
1198 bool parseDirectiveFPOPushReg(SMLoc L);
1199 bool parseDirectiveFPOStackAlloc(SMLoc L);
1200 bool parseDirectiveFPOStackAlign(SMLoc L);
1201 bool parseDirectiveFPOEndPrologue(SMLoc L);
1202 bool parseDirectiveFPOEndProc(SMLoc L);
1203
1204 /// SEH directives.
1205 bool parseSEHRegisterNumber(unsigned RegClassID, MCRegister &RegNo);
1206 bool parseDirectiveSEHPushReg(SMLoc);
1207 bool parseDirectiveSEHSetFrame(SMLoc);
1208 bool parseDirectiveSEHSaveReg(SMLoc);
1209 bool parseDirectiveSEHSaveXMM(SMLoc);
1210 bool parseDirectiveSEHPushFrame(SMLoc);
1211
1212 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
1213
1214 bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
1215 bool processInstruction(MCInst &Inst, const OperandVector &Ops);
1216
1217 // Load Value Injection (LVI) Mitigations for machine code
1218 void emitWarningForSpecialLVIInstruction(SMLoc Loc);
1219 void applyLVICFIMitigation(MCInst &Inst, MCStreamer &Out);
1220 void applyLVILoadHardeningMitigation(MCInst &Inst, MCStreamer &Out);
1221
1222 /// Wrapper around MCStreamer::emitInstruction(). Possibly adds
1223 /// instrumentation around Inst.
1224 void emitInstruction(MCInst &Inst, OperandVector &Operands, MCStreamer &Out);
1225
1226 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1227 OperandVector &Operands, MCStreamer &Out,
1228 uint64_t &ErrorInfo,
1229 bool MatchingInlineAsm) override;
1230
1231 void MatchFPUWaitAlias(SMLoc IDLoc, X86Operand &Op, OperandVector &Operands,
1232 MCStreamer &Out, bool MatchingInlineAsm);
1233
1234 bool ErrorMissingFeature(SMLoc IDLoc, const FeatureBitset &MissingFeatures,
1235 bool MatchingInlineAsm);
1236
1237 bool matchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode, MCInst &Inst,
1238 OperandVector &Operands, MCStreamer &Out,
1239 uint64_t &ErrorInfo, bool MatchingInlineAsm);
1240
1241 bool matchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode, MCInst &Inst,
1242 OperandVector &Operands, MCStreamer &Out,
1243 uint64_t &ErrorInfo,
1244 bool MatchingInlineAsm);
1245
1246 bool omitRegisterFromClobberLists(MCRegister Reg) override;
1247
1248 /// Parses AVX512 specific operand primitives: masked registers ({%k<NUM>}, {z})
1249 /// and memory broadcasting ({1to<NUM>}) primitives, updating Operands vector if required.
1250 /// return false if no parsing errors occurred, true otherwise.
1251 bool HandleAVX512Operand(OperandVector &Operands);
1252
1253 bool ParseZ(std::unique_ptr<X86Operand> &Z, SMLoc StartLoc);
1254
1255 bool is64BitMode() const {
1256 // FIXME: Can tablegen auto-generate this?
1257 return getSTI().hasFeature(X86::Is64Bit);
1258 }
1259 bool is32BitMode() const {
1260 // FIXME: Can tablegen auto-generate this?
1261 return getSTI().hasFeature(X86::Is32Bit);
1262 }
1263 bool is16BitMode() const {
1264 // FIXME: Can tablegen auto-generate this?
1265 return getSTI().hasFeature(X86::Is16Bit);
1266 }
1267 void SwitchMode(unsigned mode) {
1268 MCSubtargetInfo &STI = copySTI();
1269 FeatureBitset AllModes({X86::Is64Bit, X86::Is32Bit, X86::Is16Bit});
1270 FeatureBitset OldMode = STI.getFeatureBits() & AllModes;
1271 FeatureBitset FB = ComputeAvailableFeatures(
1272 STI.ToggleFeature(OldMode.flip(mode)));
1273 setAvailableFeatures(FB);
1274
1275 assert(FeatureBitset({mode}) == (STI.getFeatureBits() & AllModes));
1276 }
1277
1278 unsigned getPointerWidth() {
1279 if (is16BitMode()) return 16;
1280 if (is32BitMode()) return 32;
1281 if (is64BitMode()) return 64;
1282 llvm_unreachable("invalid mode");
1283 }
1284
1285 bool isParsingIntelSyntax() {
1286 return getParser().getAssemblerDialect();
1287 }
1288
1289 /// @name Auto-generated Matcher Functions
1290 /// {
1291
1292#define GET_ASSEMBLER_HEADER
1293#include "X86GenAsmMatcher.inc"
1294
1295 /// }
1296
1297public:
1298 enum X86MatchResultTy {
1299 Match_Unsupported = FIRST_TARGET_MATCH_RESULT_TY,
1300#define GET_OPERAND_DIAGNOSTIC_TYPES
1301#include "X86GenAsmMatcher.inc"
1302 };
1303
1304 X86AsmParser(const MCSubtargetInfo &sti, MCAsmParser &Parser,
1305 const MCInstrInfo &mii, const MCTargetOptions &Options)
1306 : MCTargetAsmParser(Options, sti, mii), InstInfo(nullptr),
1307 Code16GCC(false) {
1308
1309 Parser.addAliasForDirective(".word", ".2byte");
1310
1311 // Initialize the set of available features.
1312 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
1313 }
1314
1315 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
1316 ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
1317 SMLoc &EndLoc) override;
1318
1319 bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
1320
1321 bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
1322 SMLoc NameLoc, OperandVector &Operands) override;
1323
1324 bool ParseDirective(AsmToken DirectiveID) override;
1325};
1326} // end anonymous namespace
1327
1328#define GET_REGISTER_MATCHER
1329#define GET_SUBTARGET_FEATURE_NAME
1330#include "X86GenAsmMatcher.inc"
1331
1333 MCRegister IndexReg, unsigned Scale,
1334 bool Is64BitMode,
1335 StringRef &ErrMsg) {
1336 // If we have both a base register and an index register make sure they are
1337 // both 64-bit or 32-bit registers.
1338 // To support VSIB, IndexReg can be 128-bit or 256-bit registers.
1339
1340 if (BaseReg &&
1341 !(BaseReg == X86::RIP || BaseReg == X86::EIP ||
1342 X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) ||
1343 X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) ||
1344 X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg))) {
1345 ErrMsg = "invalid base+index expression";
1346 return true;
1347 }
1348
1349 if (IndexReg &&
1350 !(IndexReg == X86::EIZ || IndexReg == X86::RIZ ||
1351 X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
1352 X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) ||
1353 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg) ||
1354 X86MCRegisterClasses[X86::VR128XRegClassID].contains(IndexReg) ||
1355 X86MCRegisterClasses[X86::VR256XRegClassID].contains(IndexReg) ||
1356 X86MCRegisterClasses[X86::VR512RegClassID].contains(IndexReg))) {
1357 ErrMsg = "invalid base+index expression";
1358 return true;
1359 }
1360
1361 if (((BaseReg == X86::RIP || BaseReg == X86::EIP) && IndexReg) ||
1362 IndexReg == X86::EIP || IndexReg == X86::RIP || IndexReg == X86::ESP ||
1363 IndexReg == X86::RSP) {
1364 ErrMsg = "invalid base+index expression";
1365 return true;
1366 }
1367
1368 // Check for use of invalid 16-bit registers. Only BX/BP/SI/DI are allowed,
1369 // and then only in non-64-bit modes.
1370 if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) &&
1371 (Is64BitMode || (BaseReg != X86::BX && BaseReg != X86::BP &&
1372 BaseReg != X86::SI && BaseReg != X86::DI))) {
1373 ErrMsg = "invalid 16-bit base register";
1374 return true;
1375 }
1376
1377 if (!BaseReg &&
1378 X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg)) {
1379 ErrMsg = "16-bit memory operand may not include only index register";
1380 return true;
1381 }
1382
1383 if (BaseReg && IndexReg) {
1384 if (X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) &&
1385 (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
1386 X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) ||
1387 IndexReg == X86::EIZ)) {
1388 ErrMsg = "base register is 64-bit, but index register is not";
1389 return true;
1390 }
1391 if (X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) &&
1392 (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
1393 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg) ||
1394 IndexReg == X86::RIZ)) {
1395 ErrMsg = "base register is 32-bit, but index register is not";
1396 return true;
1397 }
1398 if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg)) {
1399 if (X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) ||
1400 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg)) {
1401 ErrMsg = "base register is 16-bit, but index register is not";
1402 return true;
1403 }
1404 if ((BaseReg != X86::BX && BaseReg != X86::BP) ||
1405 (IndexReg != X86::SI && IndexReg != X86::DI)) {
1406 ErrMsg = "invalid 16-bit base/index register combination";
1407 return true;
1408 }
1409 }
1410 }
1411
1412 // RIP/EIP-relative addressing is only supported in 64-bit mode.
1413 if (!Is64BitMode && (BaseReg == X86::RIP || BaseReg == X86::EIP)) {
1414 ErrMsg = "IP-relative addressing requires 64-bit mode";
1415 return true;
1416 }
1417
1418 return checkScale(Scale, ErrMsg);
1419}
1420
1421bool X86AsmParser::MatchRegisterByName(MCRegister &RegNo, StringRef RegName,
1422 SMLoc StartLoc, SMLoc EndLoc) {
1423 // If we encounter a %, ignore it. This code handles registers with and
1424 // without the prefix, unprefixed registers can occur in cfi directives.
1425 RegName.consume_front("%");
1426
1427 RegNo = MatchRegisterName(RegName);
1428
1429 // If the match failed, try the register name as lowercase.
1430 if (!RegNo)
1431 RegNo = MatchRegisterName(RegName.lower());
1432
1433 // The "flags" and "mxcsr" registers cannot be referenced directly.
1434 // Treat it as an identifier instead.
1435 if (isParsingMSInlineAsm() && isParsingIntelSyntax() &&
1436 (RegNo == X86::EFLAGS || RegNo == X86::MXCSR))
1437 RegNo = MCRegister();
1438
1439 if (!is64BitMode()) {
1440 // FIXME: This should be done using Requires<Not64BitMode> and
1441 // Requires<In64BitMode> so "eiz" usage in 64-bit instructions can be also
1442 // checked.
1443 if (RegNo == X86::RIZ || RegNo == X86::RIP ||
1444 X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo) ||
1447 return Error(StartLoc,
1448 "register %" + RegName + " is only available in 64-bit mode",
1449 SMRange(StartLoc, EndLoc));
1450 }
1451 }
1452
1453 if (X86II::isApxExtendedReg(RegNo))
1454 UseApxExtendedReg = true;
1455
1456 // If this is "db[0-15]", match it as an alias
1457 // for dr[0-15].
1458 if (!RegNo && RegName.starts_with("db")) {
1459 if (RegName.size() == 3) {
1460 switch (RegName[2]) {
1461 case '0':
1462 RegNo = X86::DR0;
1463 break;
1464 case '1':
1465 RegNo = X86::DR1;
1466 break;
1467 case '2':
1468 RegNo = X86::DR2;
1469 break;
1470 case '3':
1471 RegNo = X86::DR3;
1472 break;
1473 case '4':
1474 RegNo = X86::DR4;
1475 break;
1476 case '5':
1477 RegNo = X86::DR5;
1478 break;
1479 case '6':
1480 RegNo = X86::DR6;
1481 break;
1482 case '7':
1483 RegNo = X86::DR7;
1484 break;
1485 case '8':
1486 RegNo = X86::DR8;
1487 break;
1488 case '9':
1489 RegNo = X86::DR9;
1490 break;
1491 }
1492 } else if (RegName.size() == 4 && RegName[2] == '1') {
1493 switch (RegName[3]) {
1494 case '0':
1495 RegNo = X86::DR10;
1496 break;
1497 case '1':
1498 RegNo = X86::DR11;
1499 break;
1500 case '2':
1501 RegNo = X86::DR12;
1502 break;
1503 case '3':
1504 RegNo = X86::DR13;
1505 break;
1506 case '4':
1507 RegNo = X86::DR14;
1508 break;
1509 case '5':
1510 RegNo = X86::DR15;
1511 break;
1512 }
1513 }
1514 }
1515
1516 if (!RegNo) {
1517 if (isParsingIntelSyntax())
1518 return true;
1519 return Error(StartLoc, "invalid register name", SMRange(StartLoc, EndLoc));
1520 }
1521 return false;
1522}
1523
1524bool X86AsmParser::ParseRegister(MCRegister &RegNo, SMLoc &StartLoc,
1525 SMLoc &EndLoc, bool RestoreOnFailure) {
1526 MCAsmParser &Parser = getParser();
1527 AsmLexer &Lexer = getLexer();
1528 RegNo = MCRegister();
1529
1531 auto OnFailure = [RestoreOnFailure, &Lexer, &Tokens]() {
1532 if (RestoreOnFailure) {
1533 while (!Tokens.empty()) {
1534 Lexer.UnLex(Tokens.pop_back_val());
1535 }
1536 }
1537 };
1538
1539 const AsmToken &PercentTok = Parser.getTok();
1540 StartLoc = PercentTok.getLoc();
1541
1542 // If we encounter a %, ignore it. This code handles registers with and
1543 // without the prefix, unprefixed registers can occur in cfi directives.
1544 if (!isParsingIntelSyntax() && PercentTok.is(AsmToken::Percent)) {
1545 Tokens.push_back(PercentTok);
1546 Parser.Lex(); // Eat percent token.
1547 }
1548
1549 const AsmToken &Tok = Parser.getTok();
1550 EndLoc = Tok.getEndLoc();
1551
1552 if (Tok.isNot(AsmToken::Identifier)) {
1553 OnFailure();
1554 if (isParsingIntelSyntax()) return true;
1555 return Error(StartLoc, "invalid register name",
1556 SMRange(StartLoc, EndLoc));
1557 }
1558
1559 if (MatchRegisterByName(RegNo, Tok.getString(), StartLoc, EndLoc)) {
1560 OnFailure();
1561 return true;
1562 }
1563
1564 // Parse "%st" as "%st(0)" and "%st(1)", which is multiple tokens.
1565 if (RegNo == X86::ST0) {
1566 Tokens.push_back(Tok);
1567 Parser.Lex(); // Eat 'st'
1568
1569 // Check to see if we have '(4)' after %st.
1570 if (Lexer.isNot(AsmToken::LParen))
1571 return false;
1572 // Lex the paren.
1573 Tokens.push_back(Parser.getTok());
1574 Parser.Lex();
1575
1576 const AsmToken &IntTok = Parser.getTok();
1577 if (IntTok.isNot(AsmToken::Integer)) {
1578 OnFailure();
1579 return Error(IntTok.getLoc(), "expected stack index");
1580 }
1581 switch (IntTok.getIntVal()) {
1582 case 0: RegNo = X86::ST0; break;
1583 case 1: RegNo = X86::ST1; break;
1584 case 2: RegNo = X86::ST2; break;
1585 case 3: RegNo = X86::ST3; break;
1586 case 4: RegNo = X86::ST4; break;
1587 case 5: RegNo = X86::ST5; break;
1588 case 6: RegNo = X86::ST6; break;
1589 case 7: RegNo = X86::ST7; break;
1590 default:
1591 OnFailure();
1592 return Error(IntTok.getLoc(), "invalid stack index");
1593 }
1594
1595 // Lex IntTok
1596 Tokens.push_back(IntTok);
1597 Parser.Lex();
1598 if (Lexer.isNot(AsmToken::RParen)) {
1599 OnFailure();
1600 return Error(Parser.getTok().getLoc(), "expected ')'");
1601 }
1602
1603 EndLoc = Parser.getTok().getEndLoc();
1604 Parser.Lex(); // Eat ')'
1605 return false;
1606 }
1607
1608 EndLoc = Parser.getTok().getEndLoc();
1609
1610 if (!RegNo) {
1611 OnFailure();
1612 if (isParsingIntelSyntax()) return true;
1613 return Error(StartLoc, "invalid register name",
1614 SMRange(StartLoc, EndLoc));
1615 }
1616
1617 Parser.Lex(); // Eat identifier token.
1618 return false;
1619}
1620
1621bool X86AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
1622 SMLoc &EndLoc) {
1623 return ParseRegister(Reg, StartLoc, EndLoc, /*RestoreOnFailure=*/false);
1624}
1625
1626ParseStatus X86AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
1627 SMLoc &EndLoc) {
1628 bool Result = ParseRegister(Reg, StartLoc, EndLoc, /*RestoreOnFailure=*/true);
1629 bool PendingErrors = getParser().hasPendingError();
1630 getParser().clearPendingErrors();
1631 if (PendingErrors)
1632 return ParseStatus::Failure;
1633 if (Result)
1634 return ParseStatus::NoMatch;
1635 return ParseStatus::Success;
1636}
1637
1638std::unique_ptr<X86Operand> X86AsmParser::DefaultMemSIOperand(SMLoc Loc) {
1639 bool Parse32 = is32BitMode() || Code16GCC;
1640 MCRegister Basereg =
1641 is64BitMode() ? X86::RSI : (Parse32 ? X86::ESI : X86::SI);
1642 const MCExpr *Disp = MCConstantExpr::create(0, getContext());
1643 return X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
1644 /*BaseReg=*/Basereg, /*IndexReg=*/0, /*Scale=*/1,
1645 Loc, Loc, 0);
1646}
1647
1648std::unique_ptr<X86Operand> X86AsmParser::DefaultMemDIOperand(SMLoc Loc) {
1649 bool Parse32 = is32BitMode() || Code16GCC;
1650 MCRegister Basereg =
1651 is64BitMode() ? X86::RDI : (Parse32 ? X86::EDI : X86::DI);
1652 const MCExpr *Disp = MCConstantExpr::create(0, getContext());
1653 return X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
1654 /*BaseReg=*/Basereg, /*IndexReg=*/0, /*Scale=*/1,
1655 Loc, Loc, 0);
1656}
1657
1658bool X86AsmParser::IsSIReg(MCRegister Reg) {
1659 switch (Reg.id()) {
1660 default: llvm_unreachable("Only (R|E)SI and (R|E)DI are expected!");
1661 case X86::RSI:
1662 case X86::ESI:
1663 case X86::SI:
1664 return true;
1665 case X86::RDI:
1666 case X86::EDI:
1667 case X86::DI:
1668 return false;
1669 }
1670}
1671
1672MCRegister X86AsmParser::GetSIDIForRegClass(unsigned RegClassID, bool IsSIReg) {
1673 switch (RegClassID) {
1674 default: llvm_unreachable("Unexpected register class");
1675 case X86::GR64RegClassID:
1676 return IsSIReg ? X86::RSI : X86::RDI;
1677 case X86::GR32RegClassID:
1678 return IsSIReg ? X86::ESI : X86::EDI;
1679 case X86::GR16RegClassID:
1680 return IsSIReg ? X86::SI : X86::DI;
1681 }
1682}
1683
1684void X86AsmParser::AddDefaultSrcDestOperands(
1685 OperandVector& Operands, std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
1686 std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst) {
1687 if (isParsingIntelSyntax()) {
1688 Operands.push_back(std::move(Dst));
1689 Operands.push_back(std::move(Src));
1690 }
1691 else {
1692 Operands.push_back(std::move(Src));
1693 Operands.push_back(std::move(Dst));
1694 }
1695}
1696
1697bool X86AsmParser::VerifyAndAdjustOperands(OperandVector &OrigOperands,
1698 OperandVector &FinalOperands) {
1699
1700 if (OrigOperands.size() > 1) {
1701 // Check if sizes match, OrigOperands also contains the instruction name
1702 assert(OrigOperands.size() == FinalOperands.size() + 1 &&
1703 "Operand size mismatch");
1704
1706 // Verify types match
1707 int RegClassID = -1;
1708 for (unsigned int i = 0; i < FinalOperands.size(); ++i) {
1709 X86Operand &OrigOp = static_cast<X86Operand &>(*OrigOperands[i + 1]);
1710 X86Operand &FinalOp = static_cast<X86Operand &>(*FinalOperands[i]);
1711
1712 if (FinalOp.isReg() &&
1713 (!OrigOp.isReg() || FinalOp.getReg() != OrigOp.getReg()))
1714 // Return false and let a normal complaint about bogus operands happen
1715 return false;
1716
1717 if (FinalOp.isMem()) {
1718
1719 if (!OrigOp.isMem())
1720 // Return false and let a normal complaint about bogus operands happen
1721 return false;
1722
1723 MCRegister OrigReg = OrigOp.Mem.BaseReg;
1724 MCRegister FinalReg = FinalOp.Mem.BaseReg;
1725
1726 // If we've already encounterd a register class, make sure all register
1727 // bases are of the same register class
1728 if (RegClassID != -1 &&
1729 !X86MCRegisterClasses[RegClassID].contains(OrigReg)) {
1730 return Error(OrigOp.getStartLoc(),
1731 "mismatching source and destination index registers");
1732 }
1733
1734 if (X86MCRegisterClasses[X86::GR64RegClassID].contains(OrigReg))
1735 RegClassID = X86::GR64RegClassID;
1736 else if (X86MCRegisterClasses[X86::GR32RegClassID].contains(OrigReg))
1737 RegClassID = X86::GR32RegClassID;
1738 else if (X86MCRegisterClasses[X86::GR16RegClassID].contains(OrigReg))
1739 RegClassID = X86::GR16RegClassID;
1740 else
1741 // Unexpected register class type
1742 // Return false and let a normal complaint about bogus operands happen
1743 return false;
1744
1745 bool IsSI = IsSIReg(FinalReg);
1746 FinalReg = GetSIDIForRegClass(RegClassID, IsSI);
1747
1748 if (FinalReg != OrigReg) {
1749 std::string RegName = IsSI ? "ES:(R|E)SI" : "ES:(R|E)DI";
1750 Warnings.push_back(std::make_pair(
1751 OrigOp.getStartLoc(),
1752 "memory operand is only for determining the size, " + RegName +
1753 " will be used for the location"));
1754 }
1755
1756 FinalOp.Mem.Size = OrigOp.Mem.Size;
1757 FinalOp.Mem.SegReg = OrigOp.Mem.SegReg;
1758 FinalOp.Mem.BaseReg = FinalReg;
1759 }
1760 }
1761
1762 // Produce warnings only if all the operands passed the adjustment - prevent
1763 // legal cases like "movsd (%rax), %xmm0" mistakenly produce warnings
1764 for (auto &WarningMsg : Warnings) {
1765 Warning(WarningMsg.first, WarningMsg.second);
1766 }
1767
1768 // Remove old operands
1769 for (unsigned int i = 0; i < FinalOperands.size(); ++i)
1770 OrigOperands.pop_back();
1771 }
1772 // OrigOperands.append(FinalOperands.begin(), FinalOperands.end());
1773 for (auto &Op : FinalOperands)
1774 OrigOperands.push_back(std::move(Op));
1775
1776 return false;
1777}
1778
1779bool X86AsmParser::parseOperand(OperandVector &Operands, StringRef Name) {
1780 if (isParsingIntelSyntax())
1781 return parseIntelOperand(Operands, Name);
1782
1783 return parseATTOperand(Operands);
1784}
1785
1786bool X86AsmParser::CreateMemForMSInlineAsm(
1787 MCRegister SegReg, const MCExpr *Disp, MCRegister BaseReg,
1788 MCRegister IndexReg, unsigned Scale, bool NonAbsMem, SMLoc Start, SMLoc End,
1789 unsigned Size, StringRef Identifier, const InlineAsmIdentifierInfo &Info,
1790 OperandVector &Operands) {
1791 // If we found a decl other than a VarDecl, then assume it is a FuncDecl or
1792 // some other label reference.
1794 // Create an absolute memory reference in order to match against
1795 // instructions taking a PC relative operand.
1796 Operands.push_back(X86Operand::CreateMem(getPointerWidth(), Disp, Start,
1797 End, Size, Identifier,
1798 Info.Label.Decl));
1799 return false;
1800 }
1801 // We either have a direct symbol reference, or an offset from a symbol. The
1802 // parser always puts the symbol on the LHS, so look there for size
1803 // calculation purposes.
1804 unsigned FrontendSize = 0;
1805 void *Decl = nullptr;
1806 bool IsGlobalLV = false;
1808 // Size is in terms of bits in this context.
1809 FrontendSize = Info.Var.Type * 8;
1810 Decl = Info.Var.Decl;
1811 IsGlobalLV = Info.Var.IsGlobalLV;
1812 }
1813 // It is widely common for MS InlineAsm to use a global variable and one/two
1814 // registers in a mmory expression, and though unaccessible via rip/eip.
1815 if (IsGlobalLV) {
1816 if (BaseReg || IndexReg) {
1817 Operands.push_back(X86Operand::CreateMem(getPointerWidth(), Disp, Start,
1818 End, Size, Identifier, Decl, 0,
1819 BaseReg && IndexReg));
1820 return false;
1821 }
1822 if (NonAbsMem)
1823 BaseReg = 1; // Make isAbsMem() false
1824 }
1826 getPointerWidth(), SegReg, Disp, BaseReg, IndexReg, Scale, Start, End,
1827 Size,
1828 /*DefaultBaseReg=*/X86::RIP, Identifier, Decl, FrontendSize));
1829 return false;
1830}
1831
1832// Some binary bitwise operators have a named synonymous
1833// Query a candidate string for being such a named operator
1834// and if so - invoke the appropriate handler
1835bool X86AsmParser::ParseIntelNamedOperator(StringRef Name,
1836 IntelExprStateMachine &SM,
1837 bool &ParseError, SMLoc &End) {
1838 // A named operator should be either lower or upper case, but not a mix...
1839 // except in MASM, which uses full case-insensitivity.
1840 if (Name != Name.lower() && Name != Name.upper() &&
1841 !getParser().isParsingMasm())
1842 return false;
1843 if (Name.equals_insensitive("not")) {
1844 SM.onNot();
1845 } else if (Name.equals_insensitive("or")) {
1846 SM.onOr();
1847 } else if (Name.equals_insensitive("shl")) {
1848 SM.onLShift();
1849 } else if (Name.equals_insensitive("shr")) {
1850 SM.onRShift();
1851 } else if (Name.equals_insensitive("xor")) {
1852 SM.onXor();
1853 } else if (Name.equals_insensitive("and")) {
1854 SM.onAnd();
1855 } else if (Name.equals_insensitive("mod")) {
1856 SM.onMod();
1857 } else if (Name.equals_insensitive("offset")) {
1858 SMLoc OffsetLoc = getTok().getLoc();
1859 const MCExpr *Val = nullptr;
1860 StringRef ID;
1861 InlineAsmIdentifierInfo Info;
1862 ParseError = ParseIntelOffsetOperator(Val, ID, Info, End);
1863 if (ParseError)
1864 return true;
1865 StringRef ErrMsg;
1866 ParseError =
1867 SM.onOffset(Val, OffsetLoc, ID, Info, isParsingMSInlineAsm(), ErrMsg);
1868 if (ParseError)
1869 return Error(SMLoc::getFromPointer(Name.data()), ErrMsg);
1870 } else {
1871 return false;
1872 }
1873 if (!Name.equals_insensitive("offset"))
1874 End = consumeToken();
1875 return true;
1876}
1877bool X86AsmParser::ParseMasmNamedOperator(StringRef Name,
1878 IntelExprStateMachine &SM,
1879 bool &ParseError, SMLoc &End) {
1880 if (Name.equals_insensitive("eq")) {
1881 SM.onEq();
1882 } else if (Name.equals_insensitive("ne")) {
1883 SM.onNE();
1884 } else if (Name.equals_insensitive("lt")) {
1885 SM.onLT();
1886 } else if (Name.equals_insensitive("le")) {
1887 SM.onLE();
1888 } else if (Name.equals_insensitive("gt")) {
1889 SM.onGT();
1890 } else if (Name.equals_insensitive("ge")) {
1891 SM.onGE();
1892 } else {
1893 return false;
1894 }
1895 End = consumeToken();
1896 return true;
1897}
1898
1899// Check if current intel expression append after an operand.
1900// Like: [Operand][Intel Expression]
1901void X86AsmParser::tryParseOperandIdx(AsmToken::TokenKind PrevTK,
1902 IntelExprStateMachine &SM) {
1903 if (PrevTK != AsmToken::RBrac)
1904 return;
1905
1906 SM.setAppendAfterOperand();
1907}
1908
1909bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
1910 MCAsmParser &Parser = getParser();
1911 StringRef ErrMsg;
1912
1914
1915 if (getContext().getObjectFileInfo()->isPositionIndependent())
1916 SM.setPIC();
1917
1918 bool Done = false;
1919 while (!Done) {
1920 // Get a fresh reference on each loop iteration in case the previous
1921 // iteration moved the token storage during UnLex().
1922 const AsmToken &Tok = Parser.getTok();
1923
1924 bool UpdateLocLex = true;
1925 AsmToken::TokenKind TK = getLexer().getKind();
1926
1927 switch (TK) {
1928 default:
1929 if ((Done = SM.isValidEndState()))
1930 break;
1931 return Error(Tok.getLoc(), "unknown token in expression");
1932 case AsmToken::Error:
1933 return Error(getLexer().getErrLoc(), getLexer().getErr());
1934 break;
1935 case AsmToken::Real:
1936 // DotOperator: [ebx].0
1937 UpdateLocLex = false;
1938 if (ParseIntelDotOperator(SM, End))
1939 return true;
1940 break;
1941 case AsmToken::Dot:
1942 if (!Parser.isParsingMasm()) {
1943 if ((Done = SM.isValidEndState()))
1944 break;
1945 return Error(Tok.getLoc(), "unknown token in expression");
1946 }
1947 // MASM allows spaces around the dot operator (e.g., "var . x")
1948 Lex();
1949 UpdateLocLex = false;
1950 if (ParseIntelDotOperator(SM, End))
1951 return true;
1952 break;
1953 case AsmToken::Dollar:
1954 if (!Parser.isParsingMasm()) {
1955 if ((Done = SM.isValidEndState()))
1956 break;
1957 return Error(Tok.getLoc(), "unknown token in expression");
1958 }
1959 [[fallthrough]];
1960 case AsmToken::String: {
1961 if (Parser.isParsingMasm()) {
1962 // MASM parsers handle strings in expressions as constants.
1963 SMLoc ValueLoc = Tok.getLoc();
1964 int64_t Res;
1965 const MCExpr *Val;
1966 if (Parser.parsePrimaryExpr(Val, End, nullptr))
1967 return true;
1968 UpdateLocLex = false;
1969 if (!Val->evaluateAsAbsolute(Res, getStreamer().getAssemblerPtr()))
1970 return Error(ValueLoc, "expected absolute value");
1971 if (SM.onInteger(Res, ErrMsg))
1972 return Error(ValueLoc, ErrMsg);
1973 break;
1974 }
1975 [[fallthrough]];
1976 }
1977 case AsmToken::At:
1978 case AsmToken::Identifier: {
1979 SMLoc IdentLoc = Tok.getLoc();
1980 StringRef Identifier = Tok.getString();
1981 UpdateLocLex = false;
1982 if (Parser.isParsingMasm()) {
1983 size_t DotOffset = Identifier.find_first_of('.');
1984 if (DotOffset != StringRef::npos) {
1985 consumeToken();
1986 StringRef LHS = Identifier.slice(0, DotOffset);
1987 StringRef Dot = Identifier.substr(DotOffset, 1);
1988 StringRef RHS = Identifier.substr(DotOffset + 1);
1989 if (!RHS.empty()) {
1990 getLexer().UnLex(AsmToken(AsmToken::Identifier, RHS));
1991 }
1992 getLexer().UnLex(AsmToken(AsmToken::Dot, Dot));
1993 if (!LHS.empty()) {
1994 getLexer().UnLex(AsmToken(AsmToken::Identifier, LHS));
1995 }
1996 break;
1997 }
1998 }
1999 // (MASM only) <TYPE> PTR operator
2000 if (Parser.isParsingMasm()) {
2001 const AsmToken &NextTok = getLexer().peekTok();
2002 if (NextTok.is(AsmToken::Identifier) &&
2003 NextTok.getIdentifier().equals_insensitive("ptr")) {
2004 AsmTypeInfo Info;
2005 if (Parser.lookUpType(Identifier, Info))
2006 return Error(Tok.getLoc(), "unknown type");
2007 SM.onCast(Info);
2008 // Eat type and PTR.
2009 consumeToken();
2010 End = consumeToken();
2011 break;
2012 }
2013 }
2014 // Register, or (MASM only) <register>.<field>
2015 MCRegister Reg;
2016 if (Tok.is(AsmToken::Identifier)) {
2017 if (!ParseRegister(Reg, IdentLoc, End, /*RestoreOnFailure=*/true)) {
2018 if (SM.onRegister(Reg, ErrMsg))
2019 return Error(IdentLoc, ErrMsg);
2020 break;
2021 }
2022 if (Parser.isParsingMasm()) {
2023 const std::pair<StringRef, StringRef> IDField =
2024 Tok.getString().split('.');
2025 const StringRef ID = IDField.first, Field = IDField.second;
2026 SMLoc IDEndLoc = SMLoc::getFromPointer(ID.data() + ID.size());
2027 if (!Field.empty() &&
2028 !MatchRegisterByName(Reg, ID, IdentLoc, IDEndLoc)) {
2029 if (SM.onRegister(Reg, ErrMsg))
2030 return Error(IdentLoc, ErrMsg);
2031
2032 AsmFieldInfo Info;
2033 SMLoc FieldStartLoc = SMLoc::getFromPointer(Field.data());
2034 if (Parser.lookUpField(Field, Info))
2035 return Error(FieldStartLoc, "unknown offset");
2036 else if (SM.onPlus(ErrMsg))
2037 return Error(getTok().getLoc(), ErrMsg);
2038 else if (SM.onInteger(Info.Offset, ErrMsg))
2039 return Error(IdentLoc, ErrMsg);
2040 SM.setTypeInfo(Info.Type);
2041
2042 End = consumeToken();
2043 break;
2044 }
2045 }
2046 }
2047 // Operator synonymous ("not", "or" etc.)
2048 bool ParseError = false;
2049 if (ParseIntelNamedOperator(Identifier, SM, ParseError, End)) {
2050 if (ParseError)
2051 return true;
2052 break;
2053 }
2054 if (Parser.isParsingMasm() &&
2055 ParseMasmNamedOperator(Identifier, SM, ParseError, End)) {
2056 if (ParseError)
2057 return true;
2058 break;
2059 }
2060 // Symbol reference, when parsing assembly content
2061 InlineAsmIdentifierInfo Info;
2062 AsmFieldInfo FieldInfo;
2063 const MCExpr *Val;
2064 if (isParsingMSInlineAsm() || Parser.isParsingMasm()) {
2065 // MS Dot Operator expression
2066 if (Identifier.contains('.') &&
2067 (PrevTK == AsmToken::RBrac || PrevTK == AsmToken::RParen)) {
2068 if (ParseIntelDotOperator(SM, End))
2069 return true;
2070 break;
2071 }
2072 }
2073 if (isParsingMSInlineAsm()) {
2074 // MS InlineAsm operators (TYPE/LENGTH/SIZE)
2075 if (unsigned OpKind = IdentifyIntelInlineAsmOperator(Identifier)) {
2076 if (int64_t Val = ParseIntelInlineAsmOperator(OpKind)) {
2077 if (SM.onInteger(Val, ErrMsg))
2078 return Error(IdentLoc, ErrMsg);
2079 } else {
2080 return true;
2081 }
2082 break;
2083 }
2084 // MS InlineAsm identifier
2085 // Call parseIdentifier() to combine @ with the identifier behind it.
2086 if (TK == AsmToken::At && Parser.parseIdentifier(Identifier))
2087 return Error(IdentLoc, "expected identifier");
2088 if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info, false, End))
2089 return true;
2090 else if (SM.onIdentifierExpr(Val, Identifier, Info, FieldInfo.Type,
2091 true, ErrMsg))
2092 return Error(IdentLoc, ErrMsg);
2093 break;
2094 }
2095 if (Parser.isParsingMasm()) {
2096 if (unsigned OpKind = IdentifyMasmOperator(Identifier)) {
2097 int64_t Val;
2098 if (ParseMasmOperator(OpKind, Val))
2099 return true;
2100 if (SM.onInteger(Val, ErrMsg))
2101 return Error(IdentLoc, ErrMsg);
2102 break;
2103 }
2104 if (!getParser().lookUpType(Identifier, FieldInfo.Type)) {
2105 // Field offset immediate; <TYPE>.<field specification>
2106 Lex(); // eat type
2107 bool EndDot = parseOptionalToken(AsmToken::Dot);
2108 while (EndDot || (getTok().is(AsmToken::Identifier) &&
2109 getTok().getString().starts_with("."))) {
2110 getParser().parseIdentifier(Identifier);
2111 if (!EndDot)
2112 Identifier.consume_front(".");
2113 EndDot = Identifier.consume_back(".");
2114 if (getParser().lookUpField(FieldInfo.Type.Name, Identifier,
2115 FieldInfo)) {
2116 SMLoc IDEnd =
2118 return Error(IdentLoc, "Unable to lookup field reference!",
2119 SMRange(IdentLoc, IDEnd));
2120 }
2121 if (!EndDot)
2122 EndDot = parseOptionalToken(AsmToken::Dot);
2123 }
2124 if (SM.onInteger(FieldInfo.Offset, ErrMsg))
2125 return Error(IdentLoc, ErrMsg);
2126 break;
2127 }
2128 }
2129 if (getParser().parsePrimaryExpr(Val, End, &FieldInfo.Type)) {
2130 return Error(Tok.getLoc(), "Unexpected identifier!");
2131 } else if (SM.onIdentifierExpr(Val, Identifier, Info, FieldInfo.Type,
2132 false, ErrMsg)) {
2133 return Error(IdentLoc, ErrMsg);
2134 }
2135 break;
2136 }
2137 case AsmToken::Integer: {
2138 // Look for 'b' or 'f' following an Integer as a directional label
2139 SMLoc Loc = getTok().getLoc();
2140 int64_t IntVal = getTok().getIntVal();
2141 End = consumeToken();
2142 UpdateLocLex = false;
2143 if (getLexer().getKind() == AsmToken::Identifier) {
2144 StringRef IDVal = getTok().getString();
2145 if (IDVal == "f" || IDVal == "b") {
2146 MCSymbol *Sym =
2147 getContext().getDirectionalLocalSymbol(IntVal, IDVal == "b");
2148 auto Variant = X86::S_None;
2149 const MCExpr *Val =
2150 MCSymbolRefExpr::create(Sym, Variant, getContext());
2151 if (IDVal == "b" && Sym->isUndefined())
2152 return Error(Loc, "invalid reference to undefined symbol");
2153 StringRef Identifier = Sym->getName();
2154 InlineAsmIdentifierInfo Info;
2155 AsmTypeInfo Type;
2156 if (SM.onIdentifierExpr(Val, Identifier, Info, Type,
2157 isParsingMSInlineAsm(), ErrMsg))
2158 return Error(Loc, ErrMsg);
2159 End = consumeToken();
2160 } else {
2161 if (SM.onInteger(IntVal, ErrMsg))
2162 return Error(Loc, ErrMsg);
2163 }
2164 } else {
2165 if (SM.onInteger(IntVal, ErrMsg))
2166 return Error(Loc, ErrMsg);
2167 }
2168 break;
2169 }
2170 case AsmToken::Plus:
2171 if (SM.onPlus(ErrMsg))
2172 return Error(getTok().getLoc(), ErrMsg);
2173 break;
2174 case AsmToken::Minus:
2175 if (SM.onMinus(ErrMsg))
2176 return Error(getTok().getLoc(), ErrMsg);
2177 break;
2178 case AsmToken::Tilde: SM.onNot(); break;
2179 case AsmToken::Star: SM.onStar(); break;
2180 case AsmToken::Slash: SM.onDivide(); break;
2181 case AsmToken::Percent: SM.onMod(); break;
2182 case AsmToken::Pipe: SM.onOr(); break;
2183 case AsmToken::Caret: SM.onXor(); break;
2184 case AsmToken::Amp: SM.onAnd(); break;
2185 case AsmToken::LessLess:
2186 SM.onLShift(); break;
2188 SM.onRShift(); break;
2189 case AsmToken::LBrac:
2190 if (SM.onLBrac())
2191 return Error(Tok.getLoc(), "unexpected bracket encountered");
2192 tryParseOperandIdx(PrevTK, SM);
2193 break;
2194 case AsmToken::RBrac:
2195 if (SM.onRBrac(ErrMsg)) {
2196 return Error(Tok.getLoc(), ErrMsg);
2197 }
2198 break;
2199 case AsmToken::LParen: SM.onLParen(); break;
2200 case AsmToken::RParen:
2201 if (SM.onRParen(ErrMsg)) {
2202 return Error(Tok.getLoc(), ErrMsg);
2203 }
2204 break;
2205 }
2206 if (SM.hadError())
2207 return Error(Tok.getLoc(), "unknown token in expression");
2208
2209 if (!Done && UpdateLocLex)
2210 End = consumeToken();
2211
2212 PrevTK = TK;
2213 }
2214 return false;
2215}
2216
2217void X86AsmParser::RewriteIntelExpression(IntelExprStateMachine &SM,
2218 SMLoc Start, SMLoc End) {
2219 SMLoc Loc = Start;
2220 unsigned ExprLen = End.getPointer() - Start.getPointer();
2221 // Skip everything before a symbol displacement (if we have one)
2222 if (SM.getSym() && !SM.isOffsetOperator()) {
2223 StringRef SymName = SM.getSymName();
2224 if (unsigned Len = SymName.data() - Start.getPointer())
2225 InstInfo->AsmRewrites->emplace_back(AOK_Skip, Start, Len);
2226 Loc = SMLoc::getFromPointer(SymName.data() + SymName.size());
2227 ExprLen = End.getPointer() - (SymName.data() + SymName.size());
2228 // If we have only a symbol than there's no need for complex rewrite,
2229 // simply skip everything after it
2230 if (!(SM.getBaseReg() || SM.getIndexReg() || SM.getImm())) {
2231 if (ExprLen)
2232 InstInfo->AsmRewrites->emplace_back(AOK_Skip, Loc, ExprLen);
2233 return;
2234 }
2235 }
2236 // Build an Intel Expression rewrite
2237 StringRef BaseRegStr;
2238 StringRef IndexRegStr;
2239 StringRef OffsetNameStr;
2240 if (SM.getBaseReg())
2241 BaseRegStr = X86IntelInstPrinter::getRegisterName(SM.getBaseReg());
2242 if (SM.getIndexReg())
2243 IndexRegStr = X86IntelInstPrinter::getRegisterName(SM.getIndexReg());
2244 if (SM.isOffsetOperator())
2245 OffsetNameStr = SM.getSymName();
2246 // Emit it
2247 IntelExpr Expr(BaseRegStr, IndexRegStr, SM.getScale(), OffsetNameStr,
2248 SM.getImm(), SM.isMemExpr());
2249 InstInfo->AsmRewrites->emplace_back(Loc, ExprLen, Expr);
2250}
2251
2252// Inline assembly may use variable names with namespace alias qualifiers.
2253bool X86AsmParser::ParseIntelInlineAsmIdentifier(
2254 const MCExpr *&Val, StringRef &Identifier, InlineAsmIdentifierInfo &Info,
2255 bool IsUnevaluatedOperand, SMLoc &End, bool IsParsingOffsetOperator) {
2256 MCAsmParser &Parser = getParser();
2257 assert(isParsingMSInlineAsm() && "Expected to be parsing inline assembly.");
2258 Val = nullptr;
2259
2260 StringRef LineBuf(Identifier.data());
2261 SemaCallback->LookupInlineAsmIdentifier(LineBuf, Info, IsUnevaluatedOperand);
2262
2263 const AsmToken &Tok = Parser.getTok();
2264 SMLoc Loc = Tok.getLoc();
2265
2266 // Advance the token stream until the end of the current token is
2267 // after the end of what the frontend claimed.
2268 const char *EndPtr = Tok.getLoc().getPointer() + LineBuf.size();
2269 do {
2270 End = Tok.getEndLoc();
2271 getLexer().Lex();
2272 } while (End.getPointer() < EndPtr);
2273 Identifier = LineBuf;
2274
2275 // The frontend should end parsing on an assembler token boundary, unless it
2276 // failed parsing.
2277 assert((End.getPointer() == EndPtr ||
2279 "frontend claimed part of a token?");
2280
2281 // If the identifier lookup was unsuccessful, assume that we are dealing with
2282 // a label.
2284 StringRef InternalName =
2285 SemaCallback->LookupInlineAsmLabel(Identifier, getSourceManager(),
2286 Loc, false);
2287 assert(InternalName.size() && "We should have an internal name here.");
2288 // Push a rewrite for replacing the identifier name with the internal name,
2289 // unless we are parsing the operand of an offset operator
2290 if (!IsParsingOffsetOperator)
2291 InstInfo->AsmRewrites->emplace_back(AOK_Label, Loc, Identifier.size(),
2292 InternalName);
2293 else
2294 Identifier = InternalName;
2295 } else if (Info.isKind(InlineAsmIdentifierInfo::IK_EnumVal))
2296 return false;
2297 // Create the symbol reference.
2298 MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
2299 auto Variant = X86::S_None;
2300 Val = MCSymbolRefExpr::create(Sym, Variant, getParser().getContext());
2301 return false;
2302}
2303
2304//ParseRoundingModeOp - Parse AVX-512 rounding mode operand
2305bool X86AsmParser::ParseRoundingModeOp(SMLoc Start, OperandVector &Operands) {
2306 MCAsmParser &Parser = getParser();
2307 const AsmToken &Tok = Parser.getTok();
2308 // Eat "{" and mark the current place.
2309 const SMLoc consumedToken = consumeToken();
2310 if (Tok.isNot(AsmToken::Identifier))
2311 return Error(Tok.getLoc(), "Expected an identifier after {");
2312 if (Tok.getIdentifier().starts_with("r")) {
2313 int rndMode = StringSwitch<int>(Tok.getIdentifier())
2314 .Case("rn", X86::STATIC_ROUNDING::TO_NEAREST_INT)
2315 .Case("rd", X86::STATIC_ROUNDING::TO_NEG_INF)
2316 .Case("ru", X86::STATIC_ROUNDING::TO_POS_INF)
2317 .Case("rz", X86::STATIC_ROUNDING::TO_ZERO)
2318 .Default(-1);
2319 if (-1 == rndMode)
2320 return Error(Tok.getLoc(), "Invalid rounding mode.");
2321 Parser.Lex(); // Eat "r*" of r*-sae
2322 if (!getLexer().is(AsmToken::Minus))
2323 return Error(Tok.getLoc(), "Expected - at this point");
2324 Parser.Lex(); // Eat "-"
2325 Parser.Lex(); // Eat the sae
2326 if (!getLexer().is(AsmToken::RCurly))
2327 return Error(Tok.getLoc(), "Expected } at this point");
2328 SMLoc End = Tok.getEndLoc();
2329 Parser.Lex(); // Eat "}"
2330 const MCExpr *RndModeOp =
2331 MCConstantExpr::create(rndMode, Parser.getContext());
2332 Operands.push_back(X86Operand::CreateImm(RndModeOp, Start, End));
2333 return false;
2334 }
2335 if (Tok.getIdentifier() == "sae") {
2336 Parser.Lex(); // Eat the sae
2337 if (!getLexer().is(AsmToken::RCurly))
2338 return Error(Tok.getLoc(), "Expected } at this point");
2339 Parser.Lex(); // Eat "}"
2340 Operands.push_back(X86Operand::CreateToken("{sae}", consumedToken));
2341 return false;
2342 }
2343 return Error(Tok.getLoc(), "unknown token in expression");
2344}
2345
2346/// Parse condtional flags for CCMP/CTEST, e.g {dfv=of,sf,zf,cf} right after
2347/// mnemonic.
2348bool X86AsmParser::parseCFlagsOp(OperandVector &Operands) {
2349 MCAsmParser &Parser = getParser();
2350 AsmToken Tok = Parser.getTok();
2351 const SMLoc Start = Tok.getLoc();
2352 if (!Tok.is(AsmToken::LCurly))
2353 return Error(Tok.getLoc(), "Expected { at this point");
2354 Parser.Lex(); // Eat "{"
2355 Tok = Parser.getTok();
2356 if (Tok.getIdentifier().lower() != "dfv")
2357 return Error(Tok.getLoc(), "Expected dfv at this point");
2358 Parser.Lex(); // Eat "dfv"
2359 Tok = Parser.getTok();
2360 if (!Tok.is(AsmToken::Equal))
2361 return Error(Tok.getLoc(), "Expected = at this point");
2362 Parser.Lex(); // Eat "="
2363
2364 Tok = Parser.getTok();
2365 SMLoc End;
2366 if (Tok.is(AsmToken::RCurly)) {
2367 End = Tok.getEndLoc();
2369 MCConstantExpr::create(0, Parser.getContext()), Start, End));
2370 Parser.Lex(); // Eat "}"
2371 return false;
2372 }
2373 unsigned CFlags = 0;
2374 for (unsigned I = 0; I < 4; ++I) {
2375 Tok = Parser.getTok();
2376 unsigned CFlag = StringSwitch<unsigned>(Tok.getIdentifier().lower())
2377 .Case("of", 0x8)
2378 .Case("sf", 0x4)
2379 .Case("zf", 0x2)
2380 .Case("cf", 0x1)
2381 .Default(~0U);
2382 if (CFlag == ~0U)
2383 return Error(Tok.getLoc(), "Invalid conditional flags");
2384
2385 if (CFlags & CFlag)
2386 return Error(Tok.getLoc(), "Duplicated conditional flag");
2387 CFlags |= CFlag;
2388
2389 Parser.Lex(); // Eat one conditional flag
2390 Tok = Parser.getTok();
2391 if (Tok.is(AsmToken::RCurly)) {
2392 End = Tok.getEndLoc();
2394 MCConstantExpr::create(CFlags, Parser.getContext()), Start, End));
2395 Parser.Lex(); // Eat "}"
2396 return false;
2397 } else if (I == 3) {
2398 return Error(Tok.getLoc(), "Expected } at this point");
2399 } else if (Tok.isNot(AsmToken::Comma)) {
2400 return Error(Tok.getLoc(), "Expected } or , at this point");
2401 }
2402 Parser.Lex(); // Eat ","
2403 }
2404 llvm_unreachable("Unexpected control flow");
2405}
2406
2407/// Parse the '.' operator.
2408bool X86AsmParser::ParseIntelDotOperator(IntelExprStateMachine &SM,
2409 SMLoc &End) {
2410 const AsmToken &Tok = getTok();
2411 AsmFieldInfo Info;
2412
2413 // Drop the optional '.'.
2414 StringRef DotDispStr = Tok.getString();
2415 DotDispStr.consume_front(".");
2416 bool TrailingDot = false;
2417
2418 // .Imm gets lexed as a real.
2419 if (Tok.is(AsmToken::Real)) {
2420 APInt DotDisp;
2421 if (DotDispStr.getAsInteger(10, DotDisp))
2422 return Error(Tok.getLoc(), "Unexpected offset");
2423 Info.Offset = DotDisp.getZExtValue();
2424 } else if ((isParsingMSInlineAsm() || getParser().isParsingMasm()) &&
2425 Tok.is(AsmToken::Identifier)) {
2426 TrailingDot = DotDispStr.consume_back(".");
2427 const std::pair<StringRef, StringRef> BaseMember = DotDispStr.split('.');
2428 const StringRef Base = BaseMember.first, Member = BaseMember.second;
2429 if (getParser().lookUpField(SM.getType(), DotDispStr, Info) &&
2430 getParser().lookUpField(SM.getSymName(), DotDispStr, Info) &&
2431 getParser().lookUpField(DotDispStr, Info) &&
2432 (!SemaCallback ||
2433 SemaCallback->LookupInlineAsmField(Base, Member, Info.Offset)))
2434 return Error(Tok.getLoc(), "Unable to lookup field reference!");
2435 } else {
2436 return Error(Tok.getLoc(), "Unexpected token type!");
2437 }
2438
2439 // Eat the DotExpression and update End
2440 End = SMLoc::getFromPointer(DotDispStr.data());
2441 const char *DotExprEndLoc = DotDispStr.data() + DotDispStr.size();
2442 while (Tok.getLoc().getPointer() < DotExprEndLoc)
2443 Lex();
2444 if (TrailingDot)
2445 getLexer().UnLex(AsmToken(AsmToken::Dot, "."));
2446 SM.addImm(Info.Offset);
2447 SM.setTypeInfo(Info.Type);
2448 return false;
2449}
2450
2451/// Parse the 'offset' operator.
2452/// This operator is used to specify the location of a given operand
2453bool X86AsmParser::ParseIntelOffsetOperator(const MCExpr *&Val, StringRef &ID,
2454 InlineAsmIdentifierInfo &Info,
2455 SMLoc &End) {
2456 // Eat offset, mark start of identifier.
2457 SMLoc Start = Lex().getLoc();
2458 ID = getTok().getString();
2459 if (!isParsingMSInlineAsm()) {
2460 if ((getTok().isNot(AsmToken::Identifier) &&
2461 getTok().isNot(AsmToken::String)) ||
2462 getParser().parsePrimaryExpr(Val, End, nullptr))
2463 return Error(Start, "unexpected token!");
2464 } else if (ParseIntelInlineAsmIdentifier(Val, ID, Info, false, End, true)) {
2465 return Error(Start, "unable to lookup expression");
2466 } else if (Info.isKind(InlineAsmIdentifierInfo::IK_EnumVal)) {
2467 return Error(Start, "offset operator cannot yet handle constants");
2468 }
2469 return false;
2470}
2471
2472// Query a candidate string for being an Intel assembly operator
2473// Report back its kind, or IOK_INVALID if does not evaluated as a known one
2474unsigned X86AsmParser::IdentifyIntelInlineAsmOperator(StringRef Name) {
2475 return StringSwitch<unsigned>(Name)
2476 .Cases({"TYPE", "type"}, IOK_TYPE)
2477 .Cases({"SIZE", "size"}, IOK_SIZE)
2478 .Cases({"LENGTH", "length"}, IOK_LENGTH)
2479 .Default(IOK_INVALID);
2480}
2481
2482/// Parse the 'LENGTH', 'TYPE' and 'SIZE' operators. The LENGTH operator
2483/// returns the number of elements in an array. It returns the value 1 for
2484/// non-array variables. The SIZE operator returns the size of a C or C++
2485/// variable. A variable's size is the product of its LENGTH and TYPE. The
2486/// TYPE operator returns the size of a C or C++ type or variable. If the
2487/// variable is an array, TYPE returns the size of a single element.
2488unsigned X86AsmParser::ParseIntelInlineAsmOperator(unsigned OpKind) {
2489 MCAsmParser &Parser = getParser();
2490 const AsmToken &Tok = Parser.getTok();
2491 Parser.Lex(); // Eat operator.
2492
2493 const MCExpr *Val = nullptr;
2494 InlineAsmIdentifierInfo Info;
2495 SMLoc Start = Tok.getLoc(), End;
2496 StringRef Identifier = Tok.getString();
2497 if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info,
2498 /*IsUnevaluatedOperand=*/true, End))
2499 return 0;
2500
2502 Error(Start, "unable to lookup expression");
2503 return 0;
2504 }
2505
2506 unsigned CVal = 0;
2507 switch(OpKind) {
2508 default: llvm_unreachable("Unexpected operand kind!");
2509 case IOK_LENGTH: CVal = Info.Var.Length; break;
2510 case IOK_SIZE: CVal = Info.Var.Size; break;
2511 case IOK_TYPE: CVal = Info.Var.Type; break;
2512 }
2513
2514 return CVal;
2515}
2516
2517// Query a candidate string for being an Intel assembly operator
2518// Report back its kind, or IOK_INVALID if does not evaluated as a known one
2519unsigned X86AsmParser::IdentifyMasmOperator(StringRef Name) {
2520 return StringSwitch<unsigned>(Name.lower())
2521 .Case("type", MOK_TYPE)
2522 .Cases({"size", "sizeof"}, MOK_SIZEOF)
2523 .Cases({"length", "lengthof"}, MOK_LENGTHOF)
2524 .Default(MOK_INVALID);
2525}
2526
2527/// Parse the 'LENGTHOF', 'SIZEOF', and 'TYPE' operators. The LENGTHOF operator
2528/// returns the number of elements in an array. It returns the value 1 for
2529/// non-array variables. The SIZEOF operator returns the size of a type or
2530/// variable in bytes. A variable's size is the product of its LENGTH and TYPE.
2531/// The TYPE operator returns the size of a variable. If the variable is an
2532/// array, TYPE returns the size of a single element.
2533bool X86AsmParser::ParseMasmOperator(unsigned OpKind, int64_t &Val) {
2534 MCAsmParser &Parser = getParser();
2535 SMLoc OpLoc = Parser.getTok().getLoc();
2536 Parser.Lex(); // Eat operator.
2537
2538 Val = 0;
2539 if (OpKind == MOK_SIZEOF || OpKind == MOK_TYPE) {
2540 // Check for SIZEOF(<type>) and TYPE(<type>).
2541 bool InParens = Parser.getTok().is(AsmToken::LParen);
2542 const AsmToken &IDTok = InParens ? getLexer().peekTok() : Parser.getTok();
2543 AsmTypeInfo Type;
2544 if (IDTok.is(AsmToken::Identifier) &&
2545 !Parser.lookUpType(IDTok.getIdentifier(), Type)) {
2546 Val = Type.Size;
2547
2548 // Eat tokens.
2549 if (InParens)
2550 parseToken(AsmToken::LParen);
2551 parseToken(AsmToken::Identifier);
2552 if (InParens)
2553 parseToken(AsmToken::RParen);
2554 }
2555 }
2556
2557 if (!Val) {
2558 IntelExprStateMachine SM;
2559 SMLoc End, Start = Parser.getTok().getLoc();
2560 if (ParseIntelExpression(SM, End))
2561 return true;
2562
2563 switch (OpKind) {
2564 default:
2565 llvm_unreachable("Unexpected operand kind!");
2566 case MOK_SIZEOF:
2567 Val = SM.getSize();
2568 break;
2569 case MOK_LENGTHOF:
2570 Val = SM.getLength();
2571 break;
2572 case MOK_TYPE:
2573 Val = SM.getElementSize();
2574 break;
2575 }
2576
2577 if (!Val)
2578 return Error(OpLoc, "expression has unknown type", SMRange(Start, End));
2579 }
2580
2581 return false;
2582}
2583
2584bool X86AsmParser::ParseIntelMemoryOperandSize(unsigned &Size,
2585 StringRef *SizeStr) {
2586 Size = StringSwitch<unsigned>(getTok().getString())
2587 .Cases({"BYTE", "byte"}, 8)
2588 .Cases({"WORD", "word"}, 16)
2589 .Cases({"DWORD", "dword"}, 32)
2590 .Cases({"FLOAT", "float"}, 32)
2591 .Cases({"LONG", "long"}, 32)
2592 .Cases({"FWORD", "fword"}, 48)
2593 .Cases({"DOUBLE", "double"}, 64)
2594 .Cases({"QWORD", "qword"}, 64)
2595 .Cases({"MMWORD", "mmword"}, 64)
2596 .Cases({"XWORD", "xword"}, 80)
2597 .Cases({"TBYTE", "tbyte"}, 80)
2598 .Cases({"XMMWORD", "xmmword"}, 128)
2599 .Cases({"YMMWORD", "ymmword"}, 256)
2600 .Cases({"ZMMWORD", "zmmword"}, 512)
2601 .Default(0);
2602 if (Size) {
2603 if (SizeStr)
2604 *SizeStr = getTok().getString();
2605 const AsmToken &Tok = Lex(); // Eat operand size (e.g., byte, word).
2606 if (!(Tok.getString() == "PTR" || Tok.getString() == "ptr"))
2607 return Error(Tok.getLoc(), "Expected 'PTR' or 'ptr' token!");
2608 Lex(); // Eat ptr.
2609 }
2610 return false;
2611}
2612
2614 if (X86MCRegisterClasses[X86::GR8RegClassID].contains(RegNo))
2615 return 8;
2616 if (X86MCRegisterClasses[X86::GR16RegClassID].contains(RegNo))
2617 return 16;
2618 if (X86MCRegisterClasses[X86::GR32RegClassID].contains(RegNo))
2619 return 32;
2620 if (X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo))
2621 return 64;
2622 // Unknown register size
2623 return 0;
2624}
2625
2626bool X86AsmParser::parseIntelOperand(OperandVector &Operands, StringRef Name) {
2627 MCAsmParser &Parser = getParser();
2628 const AsmToken &Tok = Parser.getTok();
2629 SMLoc Start, End;
2630
2631 // Parse optional Size directive.
2632 unsigned Size;
2633 StringRef SizeStr;
2634 if (ParseIntelMemoryOperandSize(Size, &SizeStr))
2635 return true;
2636 bool PtrInOperand = bool(Size);
2637
2638 Start = Tok.getLoc();
2639
2640 // Rounding mode operand.
2641 if (getLexer().is(AsmToken::LCurly))
2642 return ParseRoundingModeOp(Start, Operands);
2643
2644 // Register operand.
2645 MCRegister RegNo;
2646 if (Tok.is(AsmToken::Identifier) && !parseRegister(RegNo, Start, End)) {
2647 if (RegNo == X86::RIP)
2648 return Error(Start, "rip can only be used as a base register");
2649 // A Register followed by ':' is considered a segment override
2650 if (Tok.isNot(AsmToken::Colon)) {
2651 if (PtrInOperand) {
2652 if (!Parser.isParsingMasm())
2653 return Error(Start, "expected memory operand after 'ptr', "
2654 "found register operand instead");
2655
2656 // If we are parsing MASM, we are allowed to cast registers to their own
2657 // sizes, but not to other types.
2658 uint16_t RegSize =
2659 RegSizeInBits(*getContext().getRegisterInfo(), RegNo);
2660 if (RegSize == 0)
2661 return Error(
2662 Start,
2663 "cannot cast register '" +
2664 StringRef(getContext().getRegisterInfo()->getName(RegNo)) +
2665 "'; its size is not easily defined.");
2666 if (RegSize != Size)
2667 return Error(
2668 Start,
2669 std::to_string(RegSize) + "-bit register '" +
2670 StringRef(getContext().getRegisterInfo()->getName(RegNo)) +
2671 "' cannot be used as a " + std::to_string(Size) + "-bit " +
2672 SizeStr.upper());
2673 }
2674 Operands.push_back(X86Operand::CreateReg(RegNo, Start, End));
2675 return false;
2676 }
2677 // An alleged segment override. check if we have a valid segment register
2678 if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(RegNo))
2679 return Error(Start, "invalid segment register");
2680 // Eat ':' and update Start location
2681 Start = Lex().getLoc();
2682 }
2683
2684 // Immediates and Memory
2685 IntelExprStateMachine SM;
2686 if (ParseIntelExpression(SM, End))
2687 return true;
2688
2689 if (isParsingMSInlineAsm())
2690 RewriteIntelExpression(SM, Start, Tok.getLoc());
2691
2692 int64_t Imm = SM.getImm();
2693 const MCExpr *Disp = SM.getSym();
2694 const MCExpr *ImmDisp = MCConstantExpr::create(Imm, getContext());
2695 if (Disp && Imm)
2696 Disp = MCBinaryExpr::createAdd(Disp, ImmDisp, getContext());
2697 if (!Disp)
2698 Disp = ImmDisp;
2699
2700 // RegNo != 0 specifies a valid segment register,
2701 // and we are parsing a segment override
2702 if (!SM.isMemExpr() && !RegNo) {
2703 if (isParsingMSInlineAsm() && SM.isOffsetOperator()) {
2704 const InlineAsmIdentifierInfo &Info = SM.getIdentifierInfo();
2706 // Disp includes the address of a variable; make sure this is recorded
2707 // for later handling.
2708 Operands.push_back(X86Operand::CreateImm(Disp, Start, End,
2709 SM.getSymName(), Info.Var.Decl,
2710 Info.Var.IsGlobalLV));
2711 return false;
2712 }
2713 }
2714
2715 Operands.push_back(X86Operand::CreateImm(Disp, Start, End));
2716 return false;
2717 }
2718
2719 StringRef ErrMsg;
2720 MCRegister BaseReg = SM.getBaseReg();
2721 MCRegister IndexReg = SM.getIndexReg();
2722 if (IndexReg && BaseReg == X86::RIP)
2723 BaseReg = MCRegister();
2724 unsigned Scale = SM.getScale();
2725 if (!PtrInOperand)
2726 Size = SM.getElementSize() << 3;
2727
2728 if (Scale == 0 && BaseReg != X86::ESP && BaseReg != X86::RSP &&
2729 (IndexReg == X86::ESP || IndexReg == X86::RSP))
2730 std::swap(BaseReg, IndexReg);
2731
2732 // If BaseReg is a vector register and IndexReg is not, swap them unless
2733 // Scale was specified in which case it would be an error.
2734 if (Scale == 0 &&
2735 !(X86MCRegisterClasses[X86::VR128XRegClassID].contains(IndexReg) ||
2736 X86MCRegisterClasses[X86::VR256XRegClassID].contains(IndexReg) ||
2737 X86MCRegisterClasses[X86::VR512RegClassID].contains(IndexReg)) &&
2738 (X86MCRegisterClasses[X86::VR128XRegClassID].contains(BaseReg) ||
2739 X86MCRegisterClasses[X86::VR256XRegClassID].contains(BaseReg) ||
2740 X86MCRegisterClasses[X86::VR512RegClassID].contains(BaseReg)))
2741 std::swap(BaseReg, IndexReg);
2742
2743 if (Scale != 0 &&
2744 X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg))
2745 return Error(Start, "16-bit addresses cannot have a scale");
2746
2747 // If there was no explicit scale specified, change it to 1.
2748 if (Scale == 0)
2749 Scale = 1;
2750
2751 // If this is a 16-bit addressing mode with the base and index in the wrong
2752 // order, swap them so CheckBaseRegAndIndexRegAndScale doesn't fail. It is
2753 // shared with att syntax where order matters.
2754 if ((BaseReg == X86::SI || BaseReg == X86::DI) &&
2755 (IndexReg == X86::BX || IndexReg == X86::BP))
2756 std::swap(BaseReg, IndexReg);
2757
2758 if ((BaseReg || IndexReg) &&
2759 CheckBaseRegAndIndexRegAndScale(BaseReg, IndexReg, Scale, is64BitMode(),
2760 ErrMsg))
2761 return Error(Start, ErrMsg);
2762 bool IsUnconditionalBranch =
2763 Name.equals_insensitive("jmp") || Name.equals_insensitive("call");
2764 if (isParsingMSInlineAsm())
2765 return CreateMemForMSInlineAsm(RegNo, Disp, BaseReg, IndexReg, Scale,
2766 IsUnconditionalBranch && is64BitMode(),
2767 Start, End, Size, SM.getSymName(),
2768 SM.getIdentifierInfo(), Operands);
2769
2770 // When parsing x64 MS-style assembly, all non-absolute references to a named
2771 // variable default to RIP-relative.
2772 MCRegister DefaultBaseReg;
2773 bool MaybeDirectBranchDest = true;
2774
2775 if (Parser.isParsingMasm()) {
2776 if (is64BitMode() &&
2777 ((PtrInOperand && !IndexReg) || SM.getElementSize() > 0)) {
2778 DefaultBaseReg = X86::RIP;
2779 }
2780 if (IsUnconditionalBranch) {
2781 if (PtrInOperand) {
2782 MaybeDirectBranchDest = false;
2783 if (is64BitMode())
2784 DefaultBaseReg = X86::RIP;
2785 } else if (!BaseReg && !IndexReg && Disp &&
2786 Disp->getKind() == MCExpr::SymbolRef) {
2787 if (is64BitMode()) {
2788 if (SM.getSize() == 8) {
2789 MaybeDirectBranchDest = false;
2790 DefaultBaseReg = X86::RIP;
2791 }
2792 } else {
2793 if (SM.getSize() == 4 || SM.getSize() == 2)
2794 MaybeDirectBranchDest = false;
2795 }
2796 }
2797 }
2798 } else if (IsUnconditionalBranch) {
2799 // Treat `call [offset fn_ref]` (or `jmp`) syntax as an error.
2800 if (!PtrInOperand && SM.isOffsetOperator())
2801 return Error(
2802 Start, "`OFFSET` operator cannot be used in an unconditional branch");
2803 if (PtrInOperand || SM.isBracketUsed())
2804 MaybeDirectBranchDest = false;
2805 }
2806
2807 if (CheckDispOverflow(BaseReg, IndexReg, Disp, Start))
2808 return true;
2809
2810 if ((BaseReg || IndexReg || RegNo || DefaultBaseReg))
2812 getPointerWidth(), RegNo, Disp, BaseReg, IndexReg, Scale, Start, End,
2813 Size, DefaultBaseReg, /*SymName=*/StringRef(), /*OpDecl=*/nullptr,
2814 /*FrontendSize=*/0, /*UseUpRegs=*/false, MaybeDirectBranchDest));
2815 else
2817 getPointerWidth(), Disp, Start, End, Size, /*SymName=*/StringRef(),
2818 /*OpDecl=*/nullptr, /*FrontendSize=*/0, /*UseUpRegs=*/false,
2819 MaybeDirectBranchDest));
2820 return false;
2821}
2822
2823bool X86AsmParser::parseATTOperand(OperandVector &Operands) {
2824 MCAsmParser &Parser = getParser();
2825 switch (getLexer().getKind()) {
2826 case AsmToken::Dollar: {
2827 // $42 or $ID -> immediate.
2828 SMLoc Start = Parser.getTok().getLoc(), End;
2829 Parser.Lex();
2830 const MCExpr *Val;
2831 // This is an immediate, so we should not parse a register. Do a precheck
2832 // for '%' to supercede intra-register parse errors.
2833 SMLoc L = Parser.getTok().getLoc();
2834 if (check(getLexer().is(AsmToken::Percent), L,
2835 "expected immediate expression") ||
2836 getParser().parseExpression(Val, End) ||
2837 check(isa<X86MCExpr>(Val), L, "expected immediate expression"))
2838 return true;
2839 Operands.push_back(X86Operand::CreateImm(Val, Start, End));
2840 return false;
2841 }
2842 case AsmToken::LCurly: {
2843 SMLoc Start = Parser.getTok().getLoc();
2844 return ParseRoundingModeOp(Start, Operands);
2845 }
2846 default: {
2847 // This a memory operand or a register. We have some parsing complications
2848 // as a '(' may be part of an immediate expression or the addressing mode
2849 // block. This is complicated by the fact that an assembler-level variable
2850 // may refer either to a register or an immediate expression.
2851
2852 SMLoc Loc = Parser.getTok().getLoc(), EndLoc;
2853 const MCExpr *Expr = nullptr;
2854 MCRegister Reg;
2855 if (getLexer().isNot(AsmToken::LParen)) {
2856 // No '(' so this is either a displacement expression or a register.
2857 if (Parser.parseExpression(Expr, EndLoc))
2858 return true;
2859 if (auto *RE = dyn_cast<X86MCExpr>(Expr)) {
2860 // Segment Register. Reset Expr and copy value to register.
2861 Expr = nullptr;
2862 Reg = RE->getReg();
2863
2864 // Check the register.
2865 if (Reg == X86::EIZ || Reg == X86::RIZ)
2866 return Error(
2867 Loc, "%eiz and %riz can only be used as index registers",
2868 SMRange(Loc, EndLoc));
2869 if (Reg == X86::RIP)
2870 return Error(Loc, "%rip can only be used as a base register",
2871 SMRange(Loc, EndLoc));
2872 // Return register that are not segment prefixes immediately.
2873 if (!Parser.parseOptionalToken(AsmToken::Colon)) {
2874 Operands.push_back(X86Operand::CreateReg(Reg, Loc, EndLoc));
2875 return false;
2876 }
2877 if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(Reg))
2878 return Error(Loc, "invalid segment register");
2879 // Accept a '*' absolute memory reference after the segment. Place it
2880 // before the full memory operand.
2881 if (getLexer().is(AsmToken::Star))
2882 Operands.push_back(X86Operand::CreateToken("*", consumeToken()));
2883 }
2884 }
2885 // This is a Memory operand.
2886 return ParseMemOperand(Reg, Expr, Loc, EndLoc, Operands);
2887 }
2888 }
2889}
2890
2891// X86::COND_INVALID if not a recognized condition code or alternate mnemonic,
2892// otherwise the EFLAGS Condition Code enumerator.
2893X86::CondCode X86AsmParser::ParseConditionCode(StringRef CC) {
2894 return StringSwitch<X86::CondCode>(CC)
2895 .Case("o", X86::COND_O) // Overflow
2896 .Case("no", X86::COND_NO) // No Overflow
2897 .Cases({"b", "nae"}, X86::COND_B) // Below/Neither Above nor Equal
2898 .Cases({"ae", "nb"}, X86::COND_AE) // Above or Equal/Not Below
2899 .Cases({"e", "z"}, X86::COND_E) // Equal/Zero
2900 .Cases({"ne", "nz"}, X86::COND_NE) // Not Equal/Not Zero
2901 .Cases({"be", "na"}, X86::COND_BE) // Below or Equal/Not Above
2902 .Cases({"a", "nbe"}, X86::COND_A) // Above/Neither Below nor Equal
2903 .Case("s", X86::COND_S) // Sign
2904 .Case("ns", X86::COND_NS) // No Sign
2905 .Cases({"p", "pe"}, X86::COND_P) // Parity/Parity Even
2906 .Cases({"np", "po"}, X86::COND_NP) // No Parity/Parity Odd
2907 .Cases({"l", "nge"}, X86::COND_L) // Less/Neither Greater nor Equal
2908 .Cases({"ge", "nl"}, X86::COND_GE) // Greater or Equal/Not Less
2909 .Cases({"le", "ng"}, X86::COND_LE) // Less or Equal/Not Greater
2910 .Cases({"g", "nle"}, X86::COND_G) // Greater/Neither Less nor Equal
2912}
2913
2914// true on failure, false otherwise
2915// If no {z} mark was found - Parser doesn't advance
2916bool X86AsmParser::ParseZ(std::unique_ptr<X86Operand> &Z, SMLoc StartLoc) {
2917 MCAsmParser &Parser = getParser();
2918 // Assuming we are just pass the '{' mark, quering the next token
2919 // Searched for {z}, but none was found. Return false, as no parsing error was
2920 // encountered
2921 if (!(getLexer().is(AsmToken::Identifier) &&
2922 (getLexer().getTok().getIdentifier() == "z")))
2923 return false;
2924 Parser.Lex(); // Eat z
2925 // Query and eat the '}' mark
2926 if (!getLexer().is(AsmToken::RCurly))
2927 return Error(getLexer().getLoc(), "Expected } at this point");
2928 Parser.Lex(); // Eat '}'
2929 // Assign Z with the {z} mark operand
2930 Z = X86Operand::CreateToken("{z}", StartLoc);
2931 return false;
2932}
2933
2934// true on failure, false otherwise
2935bool X86AsmParser::HandleAVX512Operand(OperandVector &Operands) {
2936 MCAsmParser &Parser = getParser();
2937 if (getLexer().is(AsmToken::LCurly)) {
2938 // Eat "{" and mark the current place.
2939 const SMLoc consumedToken = consumeToken();
2940 // Distinguish {1to<NUM>} from {%k<NUM>}.
2941 if(getLexer().is(AsmToken::Integer)) {
2942 // Parse memory broadcasting ({1to<NUM>}).
2943 if (getLexer().getTok().getIntVal() != 1)
2944 return TokError("Expected 1to<NUM> at this point");
2945 StringRef Prefix = getLexer().getTok().getString();
2946 Parser.Lex(); // Eat first token of 1to8
2947 if (!getLexer().is(AsmToken::Identifier))
2948 return TokError("Expected 1to<NUM> at this point");
2949 // Recognize only reasonable suffixes.
2950 SmallVector<char, 5> BroadcastVector;
2951 StringRef BroadcastString = (Prefix + getLexer().getTok().getIdentifier())
2952 .toStringRef(BroadcastVector);
2953 if (!BroadcastString.starts_with("1to"))
2954 return TokError("Expected 1to<NUM> at this point");
2955 const char *BroadcastPrimitive =
2956 StringSwitch<const char *>(BroadcastString)
2957 .Case("1to2", "{1to2}")
2958 .Case("1to4", "{1to4}")
2959 .Case("1to8", "{1to8}")
2960 .Case("1to16", "{1to16}")
2961 .Case("1to32", "{1to32}")
2962 .Default(nullptr);
2963 if (!BroadcastPrimitive)
2964 return TokError("Invalid memory broadcast primitive.");
2965 Parser.Lex(); // Eat trailing token of 1toN
2966 if (!getLexer().is(AsmToken::RCurly))
2967 return TokError("Expected } at this point");
2968 Parser.Lex(); // Eat "}"
2969 Operands.push_back(X86Operand::CreateToken(BroadcastPrimitive,
2970 consumedToken));
2971 // No AVX512 specific primitives can pass
2972 // after memory broadcasting, so return.
2973 return false;
2974 } else {
2975 // Parse either {k}{z}, {z}{k}, {k} or {z}
2976 // last one have no meaning, but GCC accepts it
2977 // Currently, we're just pass a '{' mark
2978 std::unique_ptr<X86Operand> Z;
2979 if (ParseZ(Z, consumedToken))
2980 return true;
2981 // Reaching here means that parsing of the allegadly '{z}' mark yielded
2982 // no errors.
2983 // Query for the need of further parsing for a {%k<NUM>} mark
2984 if (!Z || getLexer().is(AsmToken::LCurly)) {
2985 SMLoc StartLoc = Z ? consumeToken() : consumedToken;
2986 // Parse an op-mask register mark ({%k<NUM>}), which is now to be
2987 // expected
2988 MCRegister RegNo;
2989 SMLoc RegLoc;
2990 if (!parseRegister(RegNo, RegLoc, StartLoc) &&
2991 X86MCRegisterClasses[X86::VK1RegClassID].contains(RegNo)) {
2992 if (RegNo == X86::K0)
2993 return Error(RegLoc, "Register k0 can't be used as write mask");
2994 if (!getLexer().is(AsmToken::RCurly))
2995 return Error(getLexer().getLoc(), "Expected } at this point");
2996 Operands.push_back(X86Operand::CreateToken("{", StartLoc));
2997 Operands.push_back(
2998 X86Operand::CreateReg(RegNo, StartLoc, StartLoc));
2999 Operands.push_back(X86Operand::CreateToken("}", consumeToken()));
3000 } else
3001 return Error(getLexer().getLoc(),
3002 "Expected an op-mask register at this point");
3003 // {%k<NUM>} mark is found, inquire for {z}
3004 if (getLexer().is(AsmToken::LCurly) && !Z) {
3005 // Have we've found a parsing error, or found no (expected) {z} mark
3006 // - report an error
3007 if (ParseZ(Z, consumeToken()) || !Z)
3008 return Error(getLexer().getLoc(),
3009 "Expected a {z} mark at this point");
3010
3011 }
3012 // '{z}' on its own is meaningless, hence should be ignored.
3013 // on the contrary - have it been accompanied by a K register,
3014 // allow it.
3015 if (Z)
3016 Operands.push_back(std::move(Z));
3017 }
3018 }
3019 }
3020 return false;
3021}
3022
3023/// Returns false if okay and true if there was an overflow.
3024bool X86AsmParser::CheckDispOverflow(MCRegister BaseReg, MCRegister IndexReg,
3025 const MCExpr *Disp, SMLoc Loc) {
3026 // If the displacement is a constant, check overflows. For 64-bit addressing,
3027 // gas requires isInt<32> and otherwise reports an error. For others, gas
3028 // reports a warning and allows a wider range. E.g. gas allows
3029 // [-0xffffffff,0xffffffff] for 32-bit addressing (e.g. Linux kernel uses
3030 // `leal -__PAGE_OFFSET(%ecx),%esp` where __PAGE_OFFSET is 0xc0000000).
3031 if (BaseReg || IndexReg) {
3032 if (auto CE = dyn_cast<MCConstantExpr>(Disp)) {
3033 auto Imm = CE->getValue();
3034 bool Is64 = X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) ||
3035 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg);
3036 bool Is16 = X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg);
3037 if (Is64) {
3038 if (!isInt<32>(Imm))
3039 return Error(Loc, "displacement " + Twine(Imm) +
3040 " is not within [-2147483648, 2147483647]");
3041 } else if (!Is16) {
3042 if (!isUInt<32>(Imm < 0 ? -uint64_t(Imm) : uint64_t(Imm))) {
3043 Warning(Loc, "displacement " + Twine(Imm) +
3044 " shortened to 32-bit signed " +
3045 Twine(static_cast<int32_t>(Imm)));
3046 }
3047 } else if (!isUInt<16>(Imm < 0 ? -uint64_t(Imm) : uint64_t(Imm))) {
3048 Warning(Loc, "displacement " + Twine(Imm) +
3049 " shortened to 16-bit signed " +
3050 Twine(static_cast<int16_t>(Imm)));
3051 }
3052 }
3053 }
3054 return false;
3055}
3056
3057/// ParseMemOperand: 'seg : disp(basereg, indexreg, scale)'. The '%ds:' prefix
3058/// has already been parsed if present. disp may be provided as well.
3059bool X86AsmParser::ParseMemOperand(MCRegister SegReg, const MCExpr *Disp,
3060 SMLoc StartLoc, SMLoc EndLoc,
3061 OperandVector &Operands) {
3062 MCAsmParser &Parser = getParser();
3063 SMLoc Loc;
3064 // Based on the initial passed values, we may be in any of these cases, we are
3065 // in one of these cases (with current position (*)):
3066
3067 // 1. seg : * disp (base-index-scale-expr)
3068 // 2. seg : *(disp) (base-index-scale-expr)
3069 // 3. seg : *(base-index-scale-expr)
3070 // 4. disp *(base-index-scale-expr)
3071 // 5. *(disp) (base-index-scale-expr)
3072 // 6. *(base-index-scale-expr)
3073 // 7. disp *
3074 // 8. *(disp)
3075
3076 // If we do not have an displacement yet, check if we're in cases 4 or 6 by
3077 // checking if the first object after the parenthesis is a register (or an
3078 // identifier referring to a register) and parse the displacement or default
3079 // to 0 as appropriate.
3080 auto isAtMemOperand = [this]() {
3081 if (this->getLexer().isNot(AsmToken::LParen))
3082 return false;
3083 AsmToken Buf[2];
3084 StringRef Id;
3085 auto TokCount = this->getLexer().peekTokens(Buf, true);
3086 if (TokCount == 0)
3087 return false;
3088 switch (Buf[0].getKind()) {
3089 case AsmToken::Percent:
3090 case AsmToken::Comma:
3091 return true;
3092 // These lower cases are doing a peekIdentifier.
3093 case AsmToken::At:
3094 case AsmToken::Dollar:
3095 if ((TokCount > 1) &&
3096 (Buf[1].is(AsmToken::Identifier) || Buf[1].is(AsmToken::String)) &&
3097 (Buf[0].getLoc().getPointer() + 1 == Buf[1].getLoc().getPointer()))
3098 Id = StringRef(Buf[0].getLoc().getPointer(),
3099 Buf[1].getIdentifier().size() + 1);
3100 break;
3102 case AsmToken::String:
3103 Id = Buf[0].getIdentifier();
3104 break;
3105 default:
3106 return false;
3107 }
3108 // We have an ID. Check if it is bound to a register.
3109 if (!Id.empty()) {
3110 MCSymbol *Sym = this->getContext().getOrCreateSymbol(Id);
3111 if (Sym->isVariable()) {
3112 auto V = Sym->getVariableValue();
3113 return isa<X86MCExpr>(V);
3114 }
3115 }
3116 return false;
3117 };
3118
3119 if (!Disp) {
3120 // Parse immediate if we're not at a mem operand yet.
3121 if (!isAtMemOperand()) {
3122 if (Parser.parseTokenLoc(Loc) || Parser.parseExpression(Disp, EndLoc))
3123 return true;
3124 assert(!isa<X86MCExpr>(Disp) && "Expected non-register here.");
3125 } else {
3126 // Disp is implicitly zero if we haven't parsed it yet.
3127 Disp = MCConstantExpr::create(0, Parser.getContext());
3128 }
3129 }
3130
3131 // We are now either at the end of the operand or at the '(' at the start of a
3132 // base-index-scale-expr.
3133
3134 if (!parseOptionalToken(AsmToken::LParen)) {
3135 if (!SegReg)
3136 Operands.push_back(
3137 X86Operand::CreateMem(getPointerWidth(), Disp, StartLoc, EndLoc));
3138 else
3139 Operands.push_back(X86Operand::CreateMem(getPointerWidth(), SegReg, Disp,
3140 0, 0, 1, StartLoc, EndLoc));
3141 return false;
3142 }
3143
3144 // If we reached here, then eat the '(' and Process
3145 // the rest of the memory operand.
3146 MCRegister BaseReg, IndexReg;
3147 unsigned Scale = 1;
3148 SMLoc BaseLoc = getLexer().getLoc();
3149 const MCExpr *E;
3150 StringRef ErrMsg;
3151
3152 // Parse BaseReg if one is provided.
3153 if (getLexer().isNot(AsmToken::Comma) && getLexer().isNot(AsmToken::RParen)) {
3154 if (Parser.parseExpression(E, EndLoc) ||
3155 check(!isa<X86MCExpr>(E), BaseLoc, "expected register here"))
3156 return true;
3157
3158 // Check the register.
3159 BaseReg = cast<X86MCExpr>(E)->getReg();
3160 if (BaseReg == X86::EIZ || BaseReg == X86::RIZ)
3161 return Error(BaseLoc, "eiz and riz can only be used as index registers",
3162 SMRange(BaseLoc, EndLoc));
3163 }
3164
3165 if (parseOptionalToken(AsmToken::Comma)) {
3166 // Following the comma we should have either an index register, or a scale
3167 // value. We don't support the later form, but we want to parse it
3168 // correctly.
3169 //
3170 // Even though it would be completely consistent to support syntax like
3171 // "1(%eax,,1)", the assembler doesn't. Use "eiz" or "riz" for this.
3172 if (getLexer().isNot(AsmToken::RParen)) {
3173 if (Parser.parseTokenLoc(Loc) || Parser.parseExpression(E, EndLoc))
3174 return true;
3175
3176 if (!isa<X86MCExpr>(E)) {
3177 // We've parsed an unexpected Scale Value instead of an index
3178 // register. Interpret it as an absolute.
3179 int64_t ScaleVal;
3180 if (!E->evaluateAsAbsolute(ScaleVal, getStreamer().getAssemblerPtr()))
3181 return Error(Loc, "expected absolute expression");
3182 if (ScaleVal != 1)
3183 Warning(Loc, "scale factor without index register is ignored");
3184 Scale = 1;
3185 } else { // IndexReg Found.
3186 IndexReg = cast<X86MCExpr>(E)->getReg();
3187
3188 if (BaseReg == X86::RIP)
3189 return Error(Loc,
3190 "%rip as base register can not have an index register");
3191 if (IndexReg == X86::RIP)
3192 return Error(Loc, "%rip is not allowed as an index register");
3193
3194 if (parseOptionalToken(AsmToken::Comma)) {
3195 // Parse the scale amount:
3196 // ::= ',' [scale-expression]
3197
3198 // A scale amount without an index is ignored.
3199 if (getLexer().isNot(AsmToken::RParen)) {
3200 int64_t ScaleVal;
3201 if (Parser.parseTokenLoc(Loc) ||
3202 Parser.parseAbsoluteExpression(ScaleVal))
3203 return Error(Loc, "expected scale expression");
3204 Scale = (unsigned)ScaleVal;
3205 // Validate the scale amount.
3206 if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) &&
3207 Scale != 1)
3208 return Error(Loc, "scale factor in 16-bit address must be 1");
3209 if (checkScale(Scale, ErrMsg))
3210 return Error(Loc, ErrMsg);
3211 }
3212 }
3213 }
3214 }
3215 }
3216
3217 // Ok, we've eaten the memory operand, verify we have a ')' and eat it too.
3218 if (parseToken(AsmToken::RParen, "unexpected token in memory operand"))
3219 return true;
3220
3221 // This is to support otherwise illegal operand (%dx) found in various
3222 // unofficial manuals examples (e.g. "out[s]?[bwl]? %al, (%dx)") and must now
3223 // be supported. Mark such DX variants separately fix only in special cases.
3224 if (BaseReg == X86::DX && !IndexReg && Scale == 1 && !SegReg &&
3225 isa<MCConstantExpr>(Disp) &&
3226 cast<MCConstantExpr>(Disp)->getValue() == 0) {
3227 Operands.push_back(X86Operand::CreateDXReg(BaseLoc, BaseLoc));
3228 return false;
3229 }
3230
3231 if (CheckBaseRegAndIndexRegAndScale(BaseReg, IndexReg, Scale, is64BitMode(),
3232 ErrMsg))
3233 return Error(BaseLoc, ErrMsg);
3234
3235 if (CheckDispOverflow(BaseReg, IndexReg, Disp, BaseLoc))
3236 return true;
3237
3238 if (SegReg || BaseReg || IndexReg)
3239 Operands.push_back(X86Operand::CreateMem(getPointerWidth(), SegReg, Disp,
3240 BaseReg, IndexReg, Scale, StartLoc,
3241 EndLoc));
3242 else
3243 Operands.push_back(
3244 X86Operand::CreateMem(getPointerWidth(), Disp, StartLoc, EndLoc));
3245 return false;
3246}
3247
3248// Parse either a standard primary expression or a register.
3249bool X86AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
3250 MCAsmParser &Parser = getParser();
3251 // See if this is a register first.
3252 if (getTok().is(AsmToken::Percent) ||
3253 (isParsingIntelSyntax() && getTok().is(AsmToken::Identifier) &&
3254 MatchRegisterName(Parser.getTok().getString()))) {
3255 SMLoc StartLoc = Parser.getTok().getLoc();
3256 MCRegister RegNo;
3257 if (parseRegister(RegNo, StartLoc, EndLoc))
3258 return true;
3259 Res = X86MCExpr::create(RegNo, Parser.getContext());
3260 return false;
3261 }
3262 return Parser.parsePrimaryExpr(Res, EndLoc, nullptr);
3263}
3264
3265bool X86AsmParser::parseInstruction(ParseInstructionInfo &Info, StringRef Name,
3266 SMLoc NameLoc, OperandVector &Operands) {
3267 MCAsmParser &Parser = getParser();
3268 InstInfo = &Info;
3269
3270 // Reset the forced VEX encoding.
3271 ForcedOpcodePrefix = OpcodePrefix_Default;
3272 ForcedDispEncoding = DispEncoding_Default;
3273 UseApxExtendedReg = false;
3274 ForcedNoFlag = false;
3275
3276 // Parse pseudo prefixes.
3277 while (true) {
3278 if (Name == "{") {
3279 if (getLexer().isNot(AsmToken::Identifier))
3280 return Error(Parser.getTok().getLoc(), "Unexpected token after '{'");
3281 std::string Prefix = Parser.getTok().getString().lower();
3282 Parser.Lex(); // Eat identifier.
3283 if (getLexer().isNot(AsmToken::RCurly))
3284 return Error(Parser.getTok().getLoc(), "Expected '}'");
3285 Parser.Lex(); // Eat curly.
3286
3287 if (Prefix == "rex")
3288 ForcedOpcodePrefix = OpcodePrefix_REX;
3289 else if (Prefix == "rex2")
3290 ForcedOpcodePrefix = OpcodePrefix_REX2;
3291 else if (Prefix == "vex")
3292 ForcedOpcodePrefix = OpcodePrefix_VEX;
3293 else if (Prefix == "vex2")
3294 ForcedOpcodePrefix = OpcodePrefix_VEX2;
3295 else if (Prefix == "vex3")
3296 ForcedOpcodePrefix = OpcodePrefix_VEX3;
3297 else if (Prefix == "evex")
3298 ForcedOpcodePrefix = OpcodePrefix_EVEX;
3299 else if (Prefix == "disp8")
3300 ForcedDispEncoding = DispEncoding_Disp8;
3301 else if (Prefix == "disp32")
3302 ForcedDispEncoding = DispEncoding_Disp32;
3303 else if (Prefix == "nf")
3304 ForcedNoFlag = true;
3305 else
3306 return Error(NameLoc, "unknown prefix");
3307
3308 NameLoc = Parser.getTok().getLoc();
3309 if (getLexer().is(AsmToken::LCurly)) {
3310 Parser.Lex();
3311 Name = "{";
3312 } else {
3313 if (getLexer().isNot(AsmToken::Identifier))
3314 return Error(Parser.getTok().getLoc(), "Expected identifier");
3315 // FIXME: The mnemonic won't match correctly if its not in lower case.
3316 Name = Parser.getTok().getString();
3317 Parser.Lex();
3318 }
3319 continue;
3320 }
3321 // Parse MASM style pseudo prefixes.
3322 if (isParsingMSInlineAsm()) {
3323 if (Name.equals_insensitive("vex"))
3324 ForcedOpcodePrefix = OpcodePrefix_VEX;
3325 else if (Name.equals_insensitive("vex2"))
3326 ForcedOpcodePrefix = OpcodePrefix_VEX2;
3327 else if (Name.equals_insensitive("vex3"))
3328 ForcedOpcodePrefix = OpcodePrefix_VEX3;
3329 else if (Name.equals_insensitive("evex"))
3330 ForcedOpcodePrefix = OpcodePrefix_EVEX;
3331
3332 if (ForcedOpcodePrefix != OpcodePrefix_Default) {
3333 if (getLexer().isNot(AsmToken::Identifier))
3334 return Error(Parser.getTok().getLoc(), "Expected identifier");
3335 // FIXME: The mnemonic won't match correctly if its not in lower case.
3336 Name = Parser.getTok().getString();
3337 NameLoc = Parser.getTok().getLoc();
3338 Parser.Lex();
3339 }
3340 }
3341 break;
3342 }
3343
3344 // Support the suffix syntax for overriding displacement size as well.
3345 if (Name.consume_back(".d32")) {
3346 ForcedDispEncoding = DispEncoding_Disp32;
3347 } else if (Name.consume_back(".d8")) {
3348 ForcedDispEncoding = DispEncoding_Disp8;
3349 }
3350
3351 StringRef PatchedName = Name;
3352
3353 // Hack to skip "short" following Jcc.
3354 if (isParsingIntelSyntax() &&
3355 (PatchedName == "jmp" || PatchedName == "jc" || PatchedName == "jnc" ||
3356 PatchedName == "jcxz" || PatchedName == "jecxz" ||
3357 (PatchedName.starts_with("j") &&
3358 ParseConditionCode(PatchedName.substr(1)) != X86::COND_INVALID))) {
3359 StringRef NextTok = Parser.getTok().getString();
3360 if (Parser.isParsingMasm() ? NextTok.equals_insensitive("short")
3361 : NextTok == "short") {
3362 SMLoc NameEndLoc =
3363 NameLoc.getFromPointer(NameLoc.getPointer() + Name.size());
3364 // Eat the short keyword.
3365 Parser.Lex();
3366 // MS and GAS ignore the short keyword; they both determine the jmp type
3367 // based on the distance of the label. (NASM does emit different code with
3368 // and without "short," though.)
3369 InstInfo->AsmRewrites->emplace_back(AOK_Skip, NameEndLoc,
3370 NextTok.size() + 1);
3371 }
3372 }
3373
3374 // FIXME: Hack to recognize setneb as setne.
3375 if (PatchedName.starts_with("set") && PatchedName.ends_with("b") &&
3376 PatchedName != "setzub" && PatchedName != "setzunb" &&
3377 PatchedName != "setb" && PatchedName != "setnb")
3378 PatchedName = PatchedName.substr(0, Name.size()-1);
3379
3380 unsigned ComparisonPredicate = ~0U;
3381
3382 // FIXME: Hack to recognize cmp<comparison code>{sh,ss,sd,ph,ps,pd}.
3383 if ((PatchedName.starts_with("cmp") || PatchedName.starts_with("vcmp")) &&
3384 (PatchedName.ends_with("ss") || PatchedName.ends_with("sd") ||
3385 PatchedName.ends_with("sh") || PatchedName.ends_with("ph") ||
3386 PatchedName.ends_with("bf16") || PatchedName.ends_with("ps") ||
3387 PatchedName.ends_with("pd"))) {
3388 bool IsVCMP = PatchedName[0] == 'v';
3389 unsigned CCIdx = IsVCMP ? 4 : 3;
3390 unsigned suffixLength = PatchedName.ends_with("bf16") ? 5 : 2;
3391 unsigned CC = StringSwitch<unsigned>(
3392 PatchedName.slice(CCIdx, PatchedName.size() - suffixLength))
3393 .Case("eq", 0x00)
3394 .Case("eq_oq", 0x00)
3395 .Case("lt", 0x01)
3396 .Case("lt_os", 0x01)
3397 .Case("le", 0x02)
3398 .Case("le_os", 0x02)
3399 .Case("unord", 0x03)
3400 .Case("unord_q", 0x03)
3401 .Case("neq", 0x04)
3402 .Case("neq_uq", 0x04)
3403 .Case("nlt", 0x05)
3404 .Case("nlt_us", 0x05)
3405 .Case("nle", 0x06)
3406 .Case("nle_us", 0x06)
3407 .Case("ord", 0x07)
3408 .Case("ord_q", 0x07)
3409 /* AVX only from here */
3410 .Case("eq_uq", 0x08)
3411 .Case("nge", 0x09)
3412 .Case("nge_us", 0x09)
3413 .Case("ngt", 0x0A)
3414 .Case("ngt_us", 0x0A)
3415 .Case("false", 0x0B)
3416 .Case("false_oq", 0x0B)
3417 .Case("neq_oq", 0x0C)
3418 .Case("ge", 0x0D)
3419 .Case("ge_os", 0x0D)
3420 .Case("gt", 0x0E)
3421 .Case("gt_os", 0x0E)
3422 .Case("true", 0x0F)
3423 .Case("true_uq", 0x0F)
3424 .Case("eq_os", 0x10)
3425 .Case("lt_oq", 0x11)
3426 .Case("le_oq", 0x12)
3427 .Case("unord_s", 0x13)
3428 .Case("neq_us", 0x14)
3429 .Case("nlt_uq", 0x15)
3430 .Case("nle_uq", 0x16)
3431 .Case("ord_s", 0x17)
3432 .Case("eq_us", 0x18)
3433 .Case("nge_uq", 0x19)
3434 .Case("ngt_uq", 0x1A)
3435 .Case("false_os", 0x1B)
3436 .Case("neq_os", 0x1C)
3437 .Case("ge_oq", 0x1D)
3438 .Case("gt_oq", 0x1E)
3439 .Case("true_us", 0x1F)
3440 .Default(~0U);
3441 if (CC != ~0U && (IsVCMP || CC < 8) &&
3442 (IsVCMP || PatchedName.back() != 'h')) {
3443 if (PatchedName.ends_with("ss"))
3444 PatchedName = IsVCMP ? "vcmpss" : "cmpss";
3445 else if (PatchedName.ends_with("sd"))
3446 PatchedName = IsVCMP ? "vcmpsd" : "cmpsd";
3447 else if (PatchedName.ends_with("ps"))
3448 PatchedName = IsVCMP ? "vcmpps" : "cmpps";
3449 else if (PatchedName.ends_with("pd"))
3450 PatchedName = IsVCMP ? "vcmppd" : "cmppd";
3451 else if (PatchedName.ends_with("sh"))
3452 PatchedName = "vcmpsh";
3453 else if (PatchedName.ends_with("ph"))
3454 PatchedName = "vcmpph";
3455 else if (PatchedName.ends_with("bf16"))
3456 PatchedName = "vcmpbf16";
3457 else
3458 llvm_unreachable("Unexpected suffix!");
3459
3460 ComparisonPredicate = CC;
3461 }
3462 }
3463
3464 // FIXME: Hack to recognize vpcmp<comparison code>{ub,uw,ud,uq,b,w,d,q}.
3465 if (PatchedName.starts_with("vpcmp") &&
3466 (PatchedName.back() == 'b' || PatchedName.back() == 'w' ||
3467 PatchedName.back() == 'd' || PatchedName.back() == 'q')) {
3468 unsigned SuffixSize = PatchedName.drop_back().back() == 'u' ? 2 : 1;
3469 unsigned CC = StringSwitch<unsigned>(
3470 PatchedName.slice(5, PatchedName.size() - SuffixSize))
3471 .Case("eq", 0x0) // Only allowed on unsigned. Checked below.
3472 .Case("lt", 0x1)
3473 .Case("le", 0x2)
3474 //.Case("false", 0x3) // Not a documented alias.
3475 .Case("neq", 0x4)
3476 .Case("nlt", 0x5)
3477 .Case("nle", 0x6)
3478 //.Case("true", 0x7) // Not a documented alias.
3479 .Default(~0U);
3480 if (CC != ~0U && (CC != 0 || SuffixSize == 2)) {
3481 switch (PatchedName.back()) {
3482 default: llvm_unreachable("Unexpected character!");
3483 case 'b': PatchedName = SuffixSize == 2 ? "vpcmpub" : "vpcmpb"; break;
3484 case 'w': PatchedName = SuffixSize == 2 ? "vpcmpuw" : "vpcmpw"; break;
3485 case 'd': PatchedName = SuffixSize == 2 ? "vpcmpud" : "vpcmpd"; break;
3486 case 'q': PatchedName = SuffixSize == 2 ? "vpcmpuq" : "vpcmpq"; break;
3487 }
3488 // Set up the immediate to push into the operands later.
3489 ComparisonPredicate = CC;
3490 }
3491 }
3492
3493 // FIXME: Hack to recognize vpcom<comparison code>{ub,uw,ud,uq,b,w,d,q}.
3494 if (PatchedName.starts_with("vpcom") &&
3495 (PatchedName.back() == 'b' || PatchedName.back() == 'w' ||
3496 PatchedName.back() == 'd' || PatchedName.back() == 'q')) {
3497 unsigned SuffixSize = PatchedName.drop_back().back() == 'u' ? 2 : 1;
3498 unsigned CC = StringSwitch<unsigned>(
3499 PatchedName.slice(5, PatchedName.size() - SuffixSize))
3500 .Case("lt", 0x0)
3501 .Case("le", 0x1)
3502 .Case("gt", 0x2)
3503 .Case("ge", 0x3)
3504 .Case("eq", 0x4)
3505 .Case("neq", 0x5)
3506 .Case("false", 0x6)
3507 .Case("true", 0x7)
3508 .Default(~0U);
3509 if (CC != ~0U) {
3510 switch (PatchedName.back()) {
3511 default: llvm_unreachable("Unexpected character!");
3512 case 'b': PatchedName = SuffixSize == 2 ? "vpcomub" : "vpcomb"; break;
3513 case 'w': PatchedName = SuffixSize == 2 ? "vpcomuw" : "vpcomw"; break;
3514 case 'd': PatchedName = SuffixSize == 2 ? "vpcomud" : "vpcomd"; break;
3515 case 'q': PatchedName = SuffixSize == 2 ? "vpcomuq" : "vpcomq"; break;
3516 }
3517 // Set up the immediate to push into the operands later.
3518 ComparisonPredicate = CC;
3519 }
3520 }
3521
3522 // Determine whether this is an instruction prefix.
3523 // FIXME:
3524 // Enhance prefixes integrity robustness. for example, following forms
3525 // are currently tolerated:
3526 // repz repnz <insn> ; GAS errors for the use of two similar prefixes
3527 // lock addq %rax, %rbx ; Destination operand must be of memory type
3528 // xacquire <insn> ; xacquire must be accompanied by 'lock'
3529 bool IsPrefix =
3530 StringSwitch<bool>(Name)
3531 .Cases({"cs", "ds", "es", "fs", "gs", "ss"}, true)
3532 .Cases({"rex64", "data32", "data16", "addr32", "addr16"}, true)
3533 .Cases({"xacquire", "xrelease"}, true)
3534 .Cases({"acquire", "release"}, isParsingIntelSyntax())
3535 .Default(false);
3536
3537 auto isLockRepeatNtPrefix = [](StringRef N) {
3538 return StringSwitch<bool>(N)
3539 .Cases({"lock", "rep", "repe", "repz", "repne", "repnz", "notrack"},
3540 true)
3541 .Default(false);
3542 };
3543
3544 bool CurlyAsEndOfStatement = false;
3545
3546 unsigned Flags = X86::IP_NO_PREFIX;
3547 while (isLockRepeatNtPrefix(Name.lower())) {
3548 unsigned Prefix =
3549 StringSwitch<unsigned>(Name)
3550 .Case("lock", X86::IP_HAS_LOCK)
3551 .Cases({"rep", "repe", "repz"}, X86::IP_HAS_REPEAT)
3552 .Cases({"repne", "repnz"}, X86::IP_HAS_REPEAT_NE)
3553 .Case("notrack", X86::IP_HAS_NOTRACK)
3554 .Default(X86::IP_NO_PREFIX); // Invalid prefix (impossible)
3555 Flags |= Prefix;
3556 if (getLexer().is(AsmToken::EndOfStatement)) {
3557 // We don't have real instr with the given prefix
3558 // let's use the prefix as the instr.
3559 // TODO: there could be several prefixes one after another
3561 break;
3562 }
3563 // FIXME: The mnemonic won't match correctly if its not in lower case.
3564 Name = Parser.getTok().getString();
3565 Parser.Lex(); // eat the prefix
3566 // Hack: we could have something like "rep # some comment" or
3567 // "lock; cmpxchg16b $1" or "lock\0A\09incl" or "lock/incl"
3568 while (Name.starts_with(";") || Name.starts_with("\n") ||
3569 Name.starts_with("#") || Name.starts_with("\t") ||
3570 Name.starts_with("/")) {
3571 // FIXME: The mnemonic won't match correctly if its not in lower case.
3572 Name = Parser.getTok().getString();
3573 Parser.Lex(); // go to next prefix or instr
3574 }
3575 }
3576
3577 if (Flags)
3578 PatchedName = Name;
3579
3580 // Hacks to handle 'data16' and 'data32'
3581 if (PatchedName == "data16" && is16BitMode()) {
3582 return Error(NameLoc, "redundant data16 prefix");
3583 }
3584 if (PatchedName == "data32") {
3585 if (is32BitMode())
3586 return Error(NameLoc, "redundant data32 prefix");
3587 if (is64BitMode())
3588 return Error(NameLoc, "'data32' is not supported in 64-bit mode");
3589 // Hack to 'data16' for the table lookup.
3590 PatchedName = "data16";
3591
3592 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3593 StringRef Next = Parser.getTok().getString();
3594 getLexer().Lex();
3595 // data32 effectively changes the instruction suffix.
3596 // TODO Generalize.
3597 if (Next == "callw")
3598 Next = "calll";
3599 if (Next == "ljmpw")
3600 Next = "ljmpl";
3601
3602 Name = Next;
3603 PatchedName = Name;
3604 ForcedDataPrefix = X86::Is32Bit;
3605 IsPrefix = false;
3606 }
3607 }
3608
3609 Operands.push_back(X86Operand::CreateToken(PatchedName, NameLoc));
3610
3611 // Push the immediate if we extracted one from the mnemonic.
3612 if (ComparisonPredicate != ~0U && !isParsingIntelSyntax()) {
3613 const MCExpr *ImmOp = MCConstantExpr::create(ComparisonPredicate,
3614 getParser().getContext());
3615 Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
3616 }
3617
3618 // Parse condtional flags after mnemonic.
3619 if ((Name.starts_with("ccmp") || Name.starts_with("ctest")) &&
3620 parseCFlagsOp(Operands))
3621 return true;
3622
3623 // This does the actual operand parsing. Don't parse any more if we have a
3624 // prefix juxtaposed with an operation like "lock incl 4(%rax)", because we
3625 // just want to parse the "lock" as the first instruction and the "incl" as
3626 // the next one.
3627 if (getLexer().isNot(AsmToken::EndOfStatement) && !IsPrefix) {
3628 // Parse '*' modifier.
3629 if (getLexer().is(AsmToken::Star))
3630 Operands.push_back(X86Operand::CreateToken("*", consumeToken()));
3631
3632 // Read the operands.
3633 while (true) {
3634 if (parseOperand(Operands, Name))
3635 return true;
3636 if (HandleAVX512Operand(Operands))
3637 return true;
3638
3639 // check for comma and eat it
3640 if (getLexer().is(AsmToken::Comma))
3641 Parser.Lex();
3642 else
3643 break;
3644 }
3645
3646 // In MS inline asm curly braces mark the beginning/end of a block,
3647 // therefore they should be interepreted as end of statement
3648 CurlyAsEndOfStatement =
3649 isParsingIntelSyntax() && isParsingMSInlineAsm() &&
3650 (getLexer().is(AsmToken::LCurly) || getLexer().is(AsmToken::RCurly));
3651 if (getLexer().isNot(AsmToken::EndOfStatement) && !CurlyAsEndOfStatement)
3652 return TokError("unexpected token in argument list");
3653 }
3654
3655 // Push the immediate if we extracted one from the mnemonic.
3656 if (ComparisonPredicate != ~0U && isParsingIntelSyntax()) {
3657 const MCExpr *ImmOp = MCConstantExpr::create(ComparisonPredicate,
3658 getParser().getContext());
3659 Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
3660 }
3661
3662 // Consume the EndOfStatement or the prefix separator Slash
3663 if (getLexer().is(AsmToken::EndOfStatement) ||
3664 (IsPrefix && getLexer().is(AsmToken::Slash)))
3665 Parser.Lex();
3666 else if (CurlyAsEndOfStatement)
3667 // Add an actual EndOfStatement before the curly brace
3668 Info.AsmRewrites->emplace_back(AOK_EndOfStatement,
3669 getLexer().getTok().getLoc(), 0);
3670
3671 // This is for gas compatibility and cannot be done in td.
3672 // Adding "p" for some floating point with no argument.
3673 // For example: fsub --> fsubp
3674 bool IsFp =
3675 Name == "fsub" || Name == "fdiv" || Name == "fsubr" || Name == "fdivr";
3676 if (IsFp && Operands.size() == 1) {
3677 const char *Repl = StringSwitch<const char *>(Name)
3678 .Case("fsub", "fsubp")
3679 .Case("fdiv", "fdivp")
3680 .Case("fsubr", "fsubrp")
3681 .Case("fdivr", "fdivrp");
3682 static_cast<X86Operand &>(*Operands[0]).setTokenValue(Repl);
3683 }
3684
3685 if ((Name == "mov" || Name == "movw" || Name == "movl") &&
3686 (Operands.size() == 3)) {
3687 X86Operand &Op1 = (X86Operand &)*Operands[1];
3688 X86Operand &Op2 = (X86Operand &)*Operands[2];
3689 SMLoc Loc = Op1.getEndLoc();
3690 // Moving a 32 or 16 bit value into a segment register has the same
3691 // behavior. Modify such instructions to always take shorter form.
3692 if (Op1.isReg() && Op2.isReg() &&
3693 X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(
3694 Op2.getReg()) &&
3695 (X86MCRegisterClasses[X86::GR16RegClassID].contains(Op1.getReg()) ||
3696 X86MCRegisterClasses[X86::GR32RegClassID].contains(Op1.getReg()))) {
3697 // Change instruction name to match new instruction.
3698 if (Name != "mov" && Name[3] == (is16BitMode() ? 'l' : 'w')) {
3699 Name = is16BitMode() ? "movw" : "movl";
3700 Operands[0] = X86Operand::CreateToken(Name, NameLoc);
3701 }
3702 // Select the correct equivalent 16-/32-bit source register.
3703 MCRegister Reg =
3704 getX86SubSuperRegister(Op1.getReg(), is16BitMode() ? 16 : 32);
3705 Operands[1] = X86Operand::CreateReg(Reg, Loc, Loc);
3706 }
3707 }
3708
3709 // This is a terrible hack to handle "out[s]?[bwl]? %al, (%dx)" ->
3710 // "outb %al, %dx". Out doesn't take a memory form, but this is a widely
3711 // documented form in various unofficial manuals, so a lot of code uses it.
3712 if ((Name == "outb" || Name == "outsb" || Name == "outw" || Name == "outsw" ||
3713 Name == "outl" || Name == "outsl" || Name == "out" || Name == "outs") &&
3714 Operands.size() == 3) {
3715 X86Operand &Op = (X86Operand &)*Operands.back();
3716 if (Op.isDXReg())
3717 Operands.back() = X86Operand::CreateReg(X86::DX, Op.getStartLoc(),
3718 Op.getEndLoc());
3719 }
3720 // Same hack for "in[s]?[bwl]? (%dx), %al" -> "inb %dx, %al".
3721 if ((Name == "inb" || Name == "insb" || Name == "inw" || Name == "insw" ||
3722 Name == "inl" || Name == "insl" || Name == "in" || Name == "ins") &&
3723 Operands.size() == 3) {
3724 X86Operand &Op = (X86Operand &)*Operands[1];
3725 if (Op.isDXReg())
3726 Operands[1] = X86Operand::CreateReg(X86::DX, Op.getStartLoc(),
3727 Op.getEndLoc());
3728 }
3729
3731 bool HadVerifyError = false;
3732
3733 // Append default arguments to "ins[bwld]"
3734 if (Name.starts_with("ins") &&
3735 (Operands.size() == 1 || Operands.size() == 3) &&
3736 (Name == "insb" || Name == "insw" || Name == "insl" || Name == "insd" ||
3737 Name == "ins")) {
3738
3739 AddDefaultSrcDestOperands(TmpOperands,
3740 X86Operand::CreateReg(X86::DX, NameLoc, NameLoc),
3741 DefaultMemDIOperand(NameLoc));
3742 HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
3743 }
3744
3745 // Append default arguments to "outs[bwld]"
3746 if (Name.starts_with("outs") &&
3747 (Operands.size() == 1 || Operands.size() == 3) &&
3748 (Name == "outsb" || Name == "outsw" || Name == "outsl" ||
3749 Name == "outsd" || Name == "outs")) {
3750 AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
3751 X86Operand::CreateReg(X86::DX, NameLoc, NameLoc));
3752 HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
3753 }
3754
3755 // Transform "lods[bwlq]" into "lods[bwlq] ($SIREG)" for appropriate
3756 // values of $SIREG according to the mode. It would be nice if this
3757 // could be achieved with InstAlias in the tables.
3758 if (Name.starts_with("lods") &&
3759 (Operands.size() == 1 || Operands.size() == 2) &&
3760 (Name == "lods" || Name == "lodsb" || Name == "lodsw" ||
3761 Name == "lodsl" || Name == "lodsd" || Name == "lodsq")) {
3762 TmpOperands.push_back(DefaultMemSIOperand(NameLoc));
3763 HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
3764 }
3765
3766 // Transform "stos[bwlq]" into "stos[bwlq] ($DIREG)" for appropriate
3767 // values of $DIREG according to the mode. It would be nice if this
3768 // could be achieved with InstAlias in the tables.
3769 if (Name.starts_with("stos") &&
3770 (Operands.size() == 1 || Operands.size() == 2) &&
3771 (Name == "stos" || Name == "stosb" || Name == "stosw" ||
3772 Name == "stosl" || Name == "stosd" || Name == "stosq")) {
3773 TmpOperands.push_back(DefaultMemDIOperand(NameLoc));
3774 HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
3775 }
3776
3777 // Transform "scas[bwlq]" into "scas[bwlq] ($DIREG)" for appropriate
3778 // values of $DIREG according to the mode. It would be nice if this
3779 // could be achieved with InstAlias in the tables.
3780 if (Name.starts_with("scas") &&
3781 (Operands.size() == 1 || Operands.size() == 2) &&
3782 (Name == "scas" || Name == "scasb" || Name == "scasw" ||
3783 Name == "scasl" || Name == "scasd" || Name == "scasq")) {
3784 TmpOperands.push_back(DefaultMemDIOperand(NameLoc));
3785 HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
3786 }
3787
3788 // Add default SI and DI operands to "cmps[bwlq]".
3789 if (Name.starts_with("cmps") &&
3790 (Operands.size() == 1 || Operands.size() == 3) &&
3791 (Name == "cmps" || Name == "cmpsb" || Name == "cmpsw" ||
3792 Name == "cmpsl" || Name == "cmpsd" || Name == "cmpsq")) {
3793 AddDefaultSrcDestOperands(TmpOperands, DefaultMemDIOperand(NameLoc),
3794 DefaultMemSIOperand(NameLoc));
3795 HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
3796 }
3797
3798 // Add default SI and DI operands to "movs[bwlq]".
3799 if (((Name.starts_with("movs") &&
3800 (Name == "movs" || Name == "movsb" || Name == "movsw" ||
3801 Name == "movsl" || Name == "movsd" || Name == "movsq")) ||
3802 (Name.starts_with("smov") &&
3803 (Name == "smov" || Name == "smovb" || Name == "smovw" ||
3804 Name == "smovl" || Name == "smovd" || Name == "smovq"))) &&
3805 (Operands.size() == 1 || Operands.size() == 3)) {
3806 if (Name == "movsd" && Operands.size() == 1 && !isParsingIntelSyntax())
3807 Operands.back() = X86Operand::CreateToken("movsl", NameLoc);
3808 AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
3809 DefaultMemDIOperand(NameLoc));
3810 HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
3811 }
3812
3813 // Check if we encountered an error for one the string insturctions
3814 if (HadVerifyError) {
3815 return HadVerifyError;
3816 }
3817
3818 // Transforms "xlat mem8" into "xlatb"
3819 if ((Name == "xlat" || Name == "xlatb") && Operands.size() == 2) {
3820 X86Operand &Op1 = static_cast<X86Operand &>(*Operands[1]);
3821 if (Op1.isMem8()) {
3822 Warning(Op1.getStartLoc(), "memory operand is only for determining the "
3823 "size, (R|E)BX will be used for the location");
3824 Operands.pop_back();
3825 static_cast<X86Operand &>(*Operands[0]).setTokenValue("xlatb");
3826 }
3827 }
3828
3829 if (Flags)
3830 Operands.push_back(X86Operand::CreatePrefix(Flags, NameLoc, NameLoc));
3831 return false;
3832}
3833
3834static bool convertSSEToAVX(MCInst &Inst) {
3835 ArrayRef<X86TableEntry> Table{X86SSE2AVXTable};
3836 unsigned Opcode = Inst.getOpcode();
3837 const auto I = llvm::lower_bound(Table, Opcode);
3838 if (I == Table.end() || I->OldOpc != Opcode)
3839 return false;
3840
3841 Inst.setOpcode(I->NewOpc);
3842 // AVX variant of BLENDVPD/BLENDVPS/PBLENDVB instructions has more
3843 // operand compare to SSE variant, which is added below
3844 if (X86::isBLENDVPD(Opcode) || X86::isBLENDVPS(Opcode) ||
3845 X86::isPBLENDVB(Opcode))
3846 Inst.addOperand(Inst.getOperand(2));
3847
3848 return true;
3849}
3850
3851bool X86AsmParser::processInstruction(MCInst &Inst, const OperandVector &Ops) {
3852 if (MCOptions.X86Sse2Avx && convertSSEToAVX(Inst))
3853 return true;
3854
3855 if (ForcedOpcodePrefix != OpcodePrefix_VEX3 &&
3856 X86::optimizeInstFromVEX3ToVEX2(Inst, MII.get(Inst.getOpcode())))
3857 return true;
3858
3860 return true;
3861
3862 auto replaceWithCCMPCTEST = [&](unsigned Opcode) -> bool {
3863 if (ForcedOpcodePrefix == OpcodePrefix_EVEX) {
3864 Inst.setFlags(~(X86::IP_USE_EVEX)&Inst.getFlags());
3865 Inst.setOpcode(Opcode);
3868 return true;
3869 }
3870 return false;
3871 };
3872
3873 switch (Inst.getOpcode()) {
3874 default: return false;
3875 case X86::JMP_1:
3876 // {disp32} forces a larger displacement as if the instruction was relaxed.
3877 // NOTE: 16-bit mode uses 16-bit displacement even though it says {disp32}.
3878 // This matches GNU assembler.
3879 if (ForcedDispEncoding == DispEncoding_Disp32) {
3880 Inst.setOpcode(is16BitMode() ? X86::JMP_2 : X86::JMP_4);
3881 return true;
3882 }
3883
3884 return false;
3885 case X86::JCC_1:
3886 // {disp32} forces a larger displacement as if the instruction was relaxed.
3887 // NOTE: 16-bit mode uses 16-bit displacement even though it says {disp32}.
3888 // This matches GNU assembler.
3889 if (ForcedDispEncoding == DispEncoding_Disp32) {
3890 Inst.setOpcode(is16BitMode() ? X86::JCC_2 : X86::JCC_4);
3891 return true;
3892 }
3893
3894 return false;
3895 case X86::INT: {
3896 // Transforms "int $3" into "int3" as a size optimization.
3897 // We can't write this as an InstAlias.
3898 if (!Inst.getOperand(0).isImm() || Inst.getOperand(0).getImm() != 3)
3899 return false;
3900 Inst.clear();
3901 Inst.setOpcode(X86::INT3);
3902 return true;
3903 }
3904 // `{evex} cmp <>, <>` is alias of `ccmpt {dfv=} <>, <>`, and
3905 // `{evex} test <>, <>` is alias of `ctest {dfv=} <>, <>`
3906#define FROM_TO(FROM, TO) \
3907 case X86::FROM: \
3908 return replaceWithCCMPCTEST(X86::TO);
3909 FROM_TO(CMP64rr, CCMP64rr)
3910 FROM_TO(CMP64mi32, CCMP64mi32)
3911 FROM_TO(CMP64mi8, CCMP64mi8)
3912 FROM_TO(CMP64mr, CCMP64mr)
3913 FROM_TO(CMP64ri32, CCMP64ri32)
3914 FROM_TO(CMP64ri8, CCMP64ri8)
3915 FROM_TO(CMP64rm, CCMP64rm)
3916
3917 FROM_TO(CMP32rr, CCMP32rr)
3918 FROM_TO(CMP32mi, CCMP32mi)
3919 FROM_TO(CMP32mi8, CCMP32mi8)
3920 FROM_TO(CMP32mr, CCMP32mr)
3921 FROM_TO(CMP32ri, CCMP32ri)
3922 FROM_TO(CMP32ri8, CCMP32ri8)
3923 FROM_TO(CMP32rm, CCMP32rm)
3924
3925 FROM_TO(CMP16rr, CCMP16rr)
3926 FROM_TO(CMP16mi, CCMP16mi)
3927 FROM_TO(CMP16mi8, CCMP16mi8)
3928 FROM_TO(CMP16mr, CCMP16mr)
3929 FROM_TO(CMP16ri, CCMP16ri)
3930 FROM_TO(CMP16ri8, CCMP16ri8)
3931 FROM_TO(CMP16rm, CCMP16rm)
3932
3933 FROM_TO(CMP8rr, CCMP8rr)
3934 FROM_TO(CMP8mi, CCMP8mi)
3935 FROM_TO(CMP8mr, CCMP8mr)
3936 FROM_TO(CMP8ri, CCMP8ri)
3937 FROM_TO(CMP8rm, CCMP8rm)
3938
3939 FROM_TO(TEST64rr, CTEST64rr)
3940 FROM_TO(TEST64mi32, CTEST64mi32)
3941 FROM_TO(TEST64mr, CTEST64mr)
3942 FROM_TO(TEST64ri32, CTEST64ri32)
3943
3944 FROM_TO(TEST32rr, CTEST32rr)
3945 FROM_TO(TEST32mi, CTEST32mi)
3946 FROM_TO(TEST32mr, CTEST32mr)
3947 FROM_TO(TEST32ri, CTEST32ri)
3948
3949 FROM_TO(TEST16rr, CTEST16rr)
3950 FROM_TO(TEST16mi, CTEST16mi)
3951 FROM_TO(TEST16mr, CTEST16mr)
3952 FROM_TO(TEST16ri, CTEST16ri)
3953
3954 FROM_TO(TEST8rr, CTEST8rr)
3955 FROM_TO(TEST8mi, CTEST8mi)
3956 FROM_TO(TEST8mr, CTEST8mr)
3957 FROM_TO(TEST8ri, CTEST8ri)
3958#undef FROM_TO
3959 }
3960}
3961
3962bool X86AsmParser::validateInstruction(MCInst &Inst, const OperandVector &Ops) {
3963 using namespace X86;
3964 const MCRegisterInfo *MRI = getContext().getRegisterInfo();
3965 unsigned Opcode = Inst.getOpcode();
3966 uint64_t TSFlags = MII.get(Opcode).TSFlags;
3967 if (isVFCMADDCPH(Opcode) || isVFCMADDCSH(Opcode) || isVFMADDCPH(Opcode) ||
3968 isVFMADDCSH(Opcode)) {
3969 MCRegister Dest = Inst.getOperand(0).getReg();
3970 for (unsigned i = 2; i < Inst.getNumOperands(); i++)
3971 if (Inst.getOperand(i).isReg() && Dest == Inst.getOperand(i).getReg())
3972 return Warning(Ops[0]->getStartLoc(), "Destination register should be "
3973 "distinct from source registers");
3974 } else if (isVFCMULCPH(Opcode) || isVFCMULCSH(Opcode) || isVFMULCPH(Opcode) ||
3975 isVFMULCSH(Opcode)) {
3976 MCRegister Dest = Inst.getOperand(0).getReg();
3977 // The mask variants have different operand list. Scan from the third
3978 // operand to avoid emitting incorrect warning.
3979 // VFMULCPHZrr Dest, Src1, Src2
3980 // VFMULCPHZrrk Dest, Dest, Mask, Src1, Src2
3981 // VFMULCPHZrrkz Dest, Mask, Src1, Src2
3982 for (unsigned i = ((TSFlags & X86II::EVEX_K) ? 2 : 1);
3983 i < Inst.getNumOperands(); i++)
3984 if (Inst.getOperand(i).isReg() && Dest == Inst.getOperand(i).getReg())
3985 return Warning(Ops[0]->getStartLoc(), "Destination register should be "
3986 "distinct from source registers");
3987 } else if (isV4FMADDPS(Opcode) || isV4FMADDSS(Opcode) ||
3988 isV4FNMADDPS(Opcode) || isV4FNMADDSS(Opcode) ||
3989 isVP4DPWSSDS(Opcode) || isVP4DPWSSD(Opcode)) {
3990 MCRegister Src2 =
3992 .getReg();
3993 unsigned Src2Enc = MRI->getEncodingValue(Src2);
3994 if (Src2Enc % 4 != 0) {
3996 unsigned GroupStart = (Src2Enc / 4) * 4;
3997 unsigned GroupEnd = GroupStart + 3;
3998 return Warning(Ops[0]->getStartLoc(),
3999 "source register '" + RegName + "' implicitly denotes '" +
4000 RegName.take_front(3) + Twine(GroupStart) + "' to '" +
4001 RegName.take_front(3) + Twine(GroupEnd) +
4002 "' source group");
4003 }
4004 } else if (isVGATHERDPD(Opcode) || isVGATHERDPS(Opcode) ||
4005 isVGATHERQPD(Opcode) || isVGATHERQPS(Opcode) ||
4006 isVPGATHERDD(Opcode) || isVPGATHERDQ(Opcode) ||
4007 isVPGATHERQD(Opcode) || isVPGATHERQQ(Opcode)) {
4008 bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX;
4009 if (HasEVEX) {
4010 unsigned Dest = MRI->getEncodingValue(Inst.getOperand(0).getReg());
4011 unsigned Index = MRI->getEncodingValue(
4012 Inst.getOperand(4 + X86::AddrIndexReg).getReg());
4013 if (Dest == Index)
4014 return Warning(Ops[0]->getStartLoc(), "index and destination registers "
4015 "should be distinct");
4016 } else {
4017 unsigned Dest = MRI->getEncodingValue(Inst.getOperand(0).getReg());
4018 unsigned Mask = MRI->getEncodingValue(Inst.getOperand(1).getReg());
4019 unsigned Index = MRI->getEncodingValue(
4020 Inst.getOperand(3 + X86::AddrIndexReg).getReg());
4021 if (Dest == Mask || Dest == Index || Mask == Index)
4022 return Warning(Ops[0]->getStartLoc(), "mask, index, and destination "
4023 "registers should be distinct");
4024 }
4025 } else if (isTCMMIMFP16PS(Opcode) || isTCMMRLFP16PS(Opcode) ||
4026 isTDPBF16PS(Opcode) || isTDPFP16PS(Opcode) || isTDPBSSD(Opcode) ||
4027 isTDPBSUD(Opcode) || isTDPBUSD(Opcode) || isTDPBUUD(Opcode)) {
4028 MCRegister SrcDest = Inst.getOperand(0).getReg();
4029 MCRegister Src1 = Inst.getOperand(2).getReg();
4030 MCRegister Src2 = Inst.getOperand(3).getReg();
4031 if (SrcDest == Src1 || SrcDest == Src2 || Src1 == Src2)
4032 return Error(Ops[0]->getStartLoc(), "all tmm registers must be distinct");
4033 }
4034
4035 // High 8-bit regs (AH/BH/CH/DH) are incompatible with encodings that imply
4036 // extended prefixes:
4037 // * Legacy path that would emit a REX (e.g. uses r8..r15 or sil/dil/bpl/spl)
4038 // * EVEX
4039 // * REX2
4040 // VEX/XOP don't use REX; they are excluded from the legacy check.
4041 const unsigned Enc = TSFlags & X86II::EncodingMask;
4042 if (Enc != X86II::VEX && Enc != X86II::XOP) {
4043 MCRegister HReg;
4044 bool UsesRex = TSFlags & X86II::REX_W;
4045 unsigned NumOps = Inst.getNumOperands();
4046 for (unsigned i = 0; i != NumOps; ++i) {
4047 const MCOperand &MO = Inst.getOperand(i);
4048 if (!MO.isReg())
4049 continue;
4050 MCRegister Reg = MO.getReg();
4051 if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
4052 HReg = Reg;
4055 UsesRex = true;
4056 }
4057
4058 if (HReg &&
4059 (Enc == X86II::EVEX || ForcedOpcodePrefix == OpcodePrefix_REX2 ||
4060 ForcedOpcodePrefix == OpcodePrefix_REX || UsesRex)) {
4062 return Error(Ops[0]->getStartLoc(),
4063 "can't encode '" + RegName.str() +
4064 "' in an instruction requiring EVEX/REX2/REX prefix");
4065 }
4066 }
4067
4068 if ((Opcode == X86::PREFETCHIT0 || Opcode == X86::PREFETCHIT1)) {
4069 const MCOperand &MO = Inst.getOperand(X86::AddrBaseReg);
4070 if (!MO.isReg() || MO.getReg() != X86::RIP)
4071 return Warning(
4072 Ops[0]->getStartLoc(),
4073 Twine((Inst.getOpcode() == X86::PREFETCHIT0 ? "'prefetchit0'"
4074 : "'prefetchit1'")) +
4075 " only supports RIP-relative address");
4076 }
4077 return false;
4078}
4079
4080void X86AsmParser::emitWarningForSpecialLVIInstruction(SMLoc Loc) {
4081 Warning(Loc, "Instruction may be vulnerable to LVI and "
4082 "requires manual mitigation");
4083 Note(SMLoc(), "See https://software.intel.com/"
4084 "security-software-guidance/insights/"
4085 "deep-dive-load-value-injection#specialinstructions"
4086 " for more information");
4087}
4088
4089/// RET instructions and also instructions that indirect calls/jumps from memory
4090/// combine a load and a branch within a single instruction. To mitigate these
4091/// instructions against LVI, they must be decomposed into separate load and
4092/// branch instructions, with an LFENCE in between. For more details, see:
4093/// - X86LoadValueInjectionRetHardening.cpp
4094/// - X86LoadValueInjectionIndirectThunks.cpp
4095/// - https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection
4096///
4097/// Returns `true` if a mitigation was applied or warning was emitted.
4098void X86AsmParser::applyLVICFIMitigation(MCInst &Inst, MCStreamer &Out) {
4099 // Information on control-flow instructions that require manual mitigation can
4100 // be found here:
4101 // https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions
4102 switch (Inst.getOpcode()) {
4103 case X86::RET16:
4104 case X86::RET32:
4105 case X86::RET64:
4106 case X86::RETI16:
4107 case X86::RETI32:
4108 case X86::RETI64: {
4109 MCInst ShlInst, FenceInst;
4110 bool Parse32 = is32BitMode() || Code16GCC;
4111 MCRegister Basereg =
4112 is64BitMode() ? X86::RSP : (Parse32 ? X86::ESP : X86::SP);
4113 const MCExpr *Disp = MCConstantExpr::create(0, getContext());
4114 auto ShlMemOp = X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
4115 /*BaseReg=*/Basereg, /*IndexReg=*/0,
4116 /*Scale=*/1, SMLoc{}, SMLoc{}, 0);
4117 ShlInst.setOpcode(X86::SHL64mi);
4118 ShlMemOp->addMemOperands(ShlInst, 5);
4119 ShlInst.addOperand(MCOperand::createImm(0));
4120 FenceInst.setOpcode(X86::LFENCE);
4121 Out.emitInstruction(ShlInst, getSTI());
4122 Out.emitInstruction(FenceInst, getSTI());
4123 return;
4124 }
4125 case X86::JMP16m:
4126 case X86::JMP32m:
4127 case X86::JMP64m:
4128 case X86::CALL16m:
4129 case X86::CALL32m:
4130 case X86::CALL64m:
4131 emitWarningForSpecialLVIInstruction(Inst.getLoc());
4132 return;
4133 }
4134}
4135
4136/// To mitigate LVI, every instruction that performs a load can be followed by
4137/// an LFENCE instruction to squash any potential mis-speculation. There are
4138/// some instructions that require additional considerations, and may requre
4139/// manual mitigation. For more details, see:
4140/// https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection
4141///
4142/// Returns `true` if a mitigation was applied or warning was emitted.
4143void X86AsmParser::applyLVILoadHardeningMitigation(MCInst &Inst,
4144 MCStreamer &Out) {
4145 auto Opcode = Inst.getOpcode();
4146 auto Flags = Inst.getFlags();
4147 if ((Flags & X86::IP_HAS_REPEAT) || (Flags & X86::IP_HAS_REPEAT_NE)) {
4148 // Information on REP string instructions that require manual mitigation can
4149 // be found here:
4150 // https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions
4151 switch (Opcode) {
4152 case X86::CMPSB:
4153 case X86::CMPSW:
4154 case X86::CMPSL:
4155 case X86::CMPSQ:
4156 case X86::SCASB:
4157 case X86::SCASW:
4158 case X86::SCASL:
4159 case X86::SCASQ:
4160 emitWarningForSpecialLVIInstruction(Inst.getLoc());
4161 return;
4162 }
4163 } else if (Opcode == X86::REP_PREFIX || Opcode == X86::REPNE_PREFIX) {
4164 // If a REP instruction is found on its own line, it may or may not be
4165 // followed by a vulnerable instruction. Emit a warning just in case.
4166 emitWarningForSpecialLVIInstruction(Inst.getLoc());
4167 return;
4168 }
4169
4170 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4171
4172 // Can't mitigate after terminators or calls. A control flow change may have
4173 // already occurred.
4174 if (MCID.isTerminator() || MCID.isCall())
4175 return;
4176
4177 // LFENCE has the mayLoad property, don't double fence.
4178 if (MCID.mayLoad() && Inst.getOpcode() != X86::LFENCE) {
4179 MCInst FenceInst;
4180 FenceInst.setOpcode(X86::LFENCE);
4181 Out.emitInstruction(FenceInst, getSTI());
4182 }
4183}
4184
4185void X86AsmParser::emitInstruction(MCInst &Inst, OperandVector &Operands,
4186 MCStreamer &Out) {
4188 getSTI().hasFeature(X86::FeatureLVIControlFlowIntegrity))
4189 applyLVICFIMitigation(Inst, Out);
4190
4191 Out.emitInstruction(Inst, getSTI());
4192
4194 getSTI().hasFeature(X86::FeatureLVILoadHardening))
4195 applyLVILoadHardeningMitigation(Inst, Out);
4196}
4197
4198static unsigned getPrefixes(OperandVector &Operands) {
4199 unsigned Result = 0;
4200 X86Operand &Prefix = static_cast<X86Operand &>(*Operands.back());
4201 if (Prefix.isPrefix()) {
4202 Result = Prefix.getPrefix();
4203 Operands.pop_back();
4204 }
4205 return Result;
4206}
4207
4208bool X86AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4209 OperandVector &Operands,
4210 MCStreamer &Out, uint64_t &ErrorInfo,
4211 bool MatchingInlineAsm) {
4212 assert(!Operands.empty() && "Unexpect empty operand list!");
4213 assert((*Operands[0]).isToken() && "Leading operand should always be a mnemonic!");
4214
4215 // First, handle aliases that expand to multiple instructions.
4216 MatchFPUWaitAlias(IDLoc, static_cast<X86Operand &>(*Operands[0]), Operands,
4217 Out, MatchingInlineAsm);
4218 unsigned Prefixes = getPrefixes(Operands);
4219
4220 MCInst Inst;
4221
4222 // If REX/REX2/VEX/EVEX encoding is forced, we need to pass the USE_* flag to
4223 // the encoder and printer.
4224 if (ForcedOpcodePrefix == OpcodePrefix_REX)
4225 Prefixes |= X86::IP_USE_REX;
4226 else if (ForcedOpcodePrefix == OpcodePrefix_REX2)
4227 Prefixes |= X86::IP_USE_REX2;
4228 else if (ForcedOpcodePrefix == OpcodePrefix_VEX)
4229 Prefixes |= X86::IP_USE_VEX;
4230 else if (ForcedOpcodePrefix == OpcodePrefix_VEX2)
4231 Prefixes |= X86::IP_USE_VEX2;
4232 else if (ForcedOpcodePrefix == OpcodePrefix_VEX3)
4233 Prefixes |= X86::IP_USE_VEX3;
4234 else if (ForcedOpcodePrefix == OpcodePrefix_EVEX)
4235 Prefixes |= X86::IP_USE_EVEX;
4236
4237 // Set encoded flags for {disp8} and {disp32}.
4238 if (ForcedDispEncoding == DispEncoding_Disp8)
4239 Prefixes |= X86::IP_USE_DISP8;
4240 else if (ForcedDispEncoding == DispEncoding_Disp32)
4241 Prefixes |= X86::IP_USE_DISP32;
4242
4243 if (Prefixes)
4244 Inst.setFlags(Prefixes);
4245
4246 return isParsingIntelSyntax()
4247 ? matchAndEmitIntelInstruction(IDLoc, Opcode, Inst, Operands, Out,
4248 ErrorInfo, MatchingInlineAsm)
4249 : matchAndEmitATTInstruction(IDLoc, Opcode, Inst, Operands, Out,
4250 ErrorInfo, MatchingInlineAsm);
4251}
4252
4253void X86AsmParser::MatchFPUWaitAlias(SMLoc IDLoc, X86Operand &Op,
4254 OperandVector &Operands, MCStreamer &Out,
4255 bool MatchingInlineAsm) {
4256 // FIXME: This should be replaced with a real .td file alias mechanism.
4257 // Also, MatchInstructionImpl should actually *do* the EmitInstruction
4258 // call.
4259 const char *Repl = StringSwitch<const char *>(Op.getToken())
4260 .Case("finit", "fninit")
4261 .Case("fsave", "fnsave")
4262 .Case("fstcw", "fnstcw")
4263 .Case("fstcww", "fnstcw")
4264 .Case("fstenv", "fnstenv")
4265 .Case("fstsw", "fnstsw")
4266 .Case("fstsww", "fnstsw")
4267 .Case("fclex", "fnclex")
4268 .Default(nullptr);
4269 if (Repl) {
4270 MCInst Inst;
4271 Inst.setOpcode(X86::WAIT);
4272 Inst.setLoc(IDLoc);
4273 if (!MatchingInlineAsm)
4274 emitInstruction(Inst, Operands, Out);
4275 Operands[0] = X86Operand::CreateToken(Repl, IDLoc);
4276 }
4277}
4278
4279bool X86AsmParser::ErrorMissingFeature(SMLoc IDLoc,
4280 const FeatureBitset &MissingFeatures,
4281 bool MatchingInlineAsm) {
4282 assert(MissingFeatures.any() && "Unknown missing feature!");
4283 SmallString<126> Msg;
4284 raw_svector_ostream OS(Msg);
4285 OS << "instruction requires:";
4286 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
4287 if (MissingFeatures[i])
4288 OS << ' ' << getSubtargetFeatureName(i);
4289 }
4290 return Error(IDLoc, OS.str(), SMRange(), MatchingInlineAsm);
4291}
4292
4293unsigned X86AsmParser::checkTargetMatchPredicate(MCInst &Inst) {
4294 unsigned Opc = Inst.getOpcode();
4295 const MCInstrDesc &MCID = MII.get(Opc);
4296 uint64_t TSFlags = MCID.TSFlags;
4297
4298 if (UseApxExtendedReg && !X86II::canUseApxExtendedReg(MCID))
4299 return Match_Unsupported;
4300 if (ForcedNoFlag == !(TSFlags & X86II::EVEX_NF) && !X86::isCFCMOVCC(Opc))
4301 return Match_Unsupported;
4302
4303 switch (ForcedOpcodePrefix) {
4304 case OpcodePrefix_Default:
4305 break;
4306 case OpcodePrefix_REX:
4307 case OpcodePrefix_REX2:
4308 if (TSFlags & X86II::EncodingMask)
4309 return Match_Unsupported;
4310 break;
4311 case OpcodePrefix_VEX:
4312 case OpcodePrefix_VEX2:
4313 case OpcodePrefix_VEX3:
4314 if ((TSFlags & X86II::EncodingMask) != X86II::VEX)
4315 return Match_Unsupported;
4316 break;
4317 case OpcodePrefix_EVEX:
4318 if (is64BitMode() && (TSFlags & X86II::EncodingMask) != X86II::EVEX &&
4319 !X86::isCMP(Opc) && !X86::isTEST(Opc))
4320 return Match_Unsupported;
4321 if (!is64BitMode() && (TSFlags & X86II::EncodingMask) != X86II::EVEX)
4322 return Match_Unsupported;
4323 break;
4324 }
4325
4327 (ForcedOpcodePrefix != OpcodePrefix_VEX &&
4328 ForcedOpcodePrefix != OpcodePrefix_VEX2 &&
4329 ForcedOpcodePrefix != OpcodePrefix_VEX3))
4330 return Match_Unsupported;
4331
4332 return Match_Success;
4333}
4334
4335bool X86AsmParser::matchAndEmitATTInstruction(
4336 SMLoc IDLoc, unsigned &Opcode, MCInst &Inst, OperandVector &Operands,
4337 MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm) {
4338 X86Operand &Op = static_cast<X86Operand &>(*Operands[0]);
4339 SMRange EmptyRange;
4340 // In 16-bit mode, if data32 is specified, temporarily switch to 32-bit mode
4341 // when matching the instruction.
4342 if (ForcedDataPrefix == X86::Is32Bit)
4343 SwitchMode(X86::Is32Bit);
4344 // First, try a direct match.
4345 FeatureBitset MissingFeatures;
4346 unsigned OriginalError = MatchInstruction(Operands, Inst, ErrorInfo,
4347 MissingFeatures, MatchingInlineAsm,
4348 isParsingIntelSyntax());
4349 if (ForcedDataPrefix == X86::Is32Bit) {
4350 SwitchMode(X86::Is16Bit);
4351 ForcedDataPrefix = 0;
4352 }
4353 switch (OriginalError) {
4354 default: llvm_unreachable("Unexpected match result!");
4355 case Match_Success:
4356 if (!MatchingInlineAsm && validateInstruction(Inst, Operands))
4357 return true;
4358 // Some instructions need post-processing to, for example, tweak which
4359 // encoding is selected. Loop on it while changes happen so the
4360 // individual transformations can chain off each other.
4361 if (!MatchingInlineAsm)
4362 while (processInstruction(Inst, Operands))
4363 ;
4364
4365 Inst.setLoc(IDLoc);
4366 if (!MatchingInlineAsm)
4367 emitInstruction(Inst, Operands, Out);
4368 Opcode = Inst.getOpcode();
4369 return false;
4370 case Match_InvalidImmUnsignedi4: {
4371 SMLoc ErrorLoc = ((X86Operand &)*Operands[ErrorInfo]).getStartLoc();
4372 if (ErrorLoc == SMLoc())
4373 ErrorLoc = IDLoc;
4374 return Error(ErrorLoc, "immediate must be an integer in range [0, 15]",
4375 EmptyRange, MatchingInlineAsm);
4376 }
4377 case Match_MissingFeature:
4378 return ErrorMissingFeature(IDLoc, MissingFeatures, MatchingInlineAsm);
4379 case Match_InvalidOperand:
4380 case Match_MnemonicFail:
4381 case Match_Unsupported:
4382 break;
4383 }
4384 if (Op.getToken().empty()) {
4385 Error(IDLoc, "instruction must have size higher than 0", EmptyRange,
4386 MatchingInlineAsm);
4387 return true;
4388 }
4389
4390 // FIXME: Ideally, we would only attempt suffix matches for things which are
4391 // valid prefixes, and we could just infer the right unambiguous
4392 // type. However, that requires substantially more matcher support than the
4393 // following hack.
4394
4395 // Change the operand to point to a temporary token.
4396 StringRef Base = Op.getToken();
4397 SmallString<16> Tmp;
4398 Tmp += Base;
4399 Tmp += ' ';
4400 Op.setTokenValue(Tmp);
4401
4402 // If this instruction starts with an 'f', then it is a floating point stack
4403 // instruction. These come in up to three forms for 32-bit, 64-bit, and
4404 // 80-bit floating point, which use the suffixes s,l,t respectively.
4405 //
4406 // Otherwise, we assume that this may be an integer instruction, which comes
4407 // in 8/16/32/64-bit forms using the b,w,l,q suffixes respectively.
4408 const char *Suffixes = Base[0] != 'f' ? "bwlq" : "slt\0";
4409 // MemSize corresponding to Suffixes. { 8, 16, 32, 64 } { 32, 64, 80, 0 }
4410 const char *MemSize = Base[0] != 'f' ? "\x08\x10\x20\x40" : "\x20\x40\x50\0";
4411
4412 // Check for the various suffix matches.
4413 uint64_t ErrorInfoIgnore;
4414 FeatureBitset ErrorInfoMissingFeatures; // Init suppresses compiler warnings.
4415 unsigned Match[4];
4416
4417 // Some instruction like VPMULDQ is NOT the variant of VPMULD but a new one.
4418 // So we should make sure the suffix matcher only works for memory variant
4419 // that has the same size with the suffix.
4420 // FIXME: This flag is a workaround for legacy instructions that didn't
4421 // declare non suffix variant assembly.
4422 bool HasVectorReg = false;
4423 X86Operand *MemOp = nullptr;
4424 for (const auto &Op : Operands) {
4425 X86Operand *X86Op = static_cast<X86Operand *>(Op.get());
4426 if (X86Op->isVectorReg())
4427 HasVectorReg = true;
4428 else if (X86Op->isMem()) {
4429 MemOp = X86Op;
4430 assert(MemOp->Mem.Size == 0 && "Memory size always 0 under ATT syntax");
4431 // Have we found an unqualified memory operand,
4432 // break. IA allows only one memory operand.
4433 break;
4434 }
4435 }
4436
4437 for (unsigned I = 0, E = std::size(Match); I != E; ++I) {
4438 Tmp.back() = Suffixes[I];
4439 if (MemOp && HasVectorReg)
4440 MemOp->Mem.Size = MemSize[I];
4441 Match[I] = Match_MnemonicFail;
4442 if (MemOp || !HasVectorReg) {
4443 Match[I] =
4444 MatchInstruction(Operands, Inst, ErrorInfoIgnore, MissingFeatures,
4445 MatchingInlineAsm, isParsingIntelSyntax());
4446 // If this returned as a missing feature failure, remember that.
4447 if (Match[I] == Match_MissingFeature)
4448 ErrorInfoMissingFeatures = MissingFeatures;
4449 }
4450 }
4451
4452 // Restore the old token.
4453 Op.setTokenValue(Base);
4454
4455 // If exactly one matched, then we treat that as a successful match (and the
4456 // instruction will already have been filled in correctly, since the failing
4457 // matches won't have modified it).
4458 unsigned NumSuccessfulMatches = llvm::count(Match, Match_Success);
4459 if (NumSuccessfulMatches == 1) {
4460 if (!MatchingInlineAsm && validateInstruction(Inst, Operands))
4461 return true;
4462 // Some instructions need post-processing to, for example, tweak which
4463 // encoding is selected. Loop on it while changes happen so the
4464 // individual transformations can chain off each other.
4465 if (!MatchingInlineAsm)
4466 while (processInstruction(Inst, Operands))
4467 ;
4468
4469 Inst.setLoc(IDLoc);
4470 if (!MatchingInlineAsm)
4471 emitInstruction(Inst, Operands, Out);
4472 Opcode = Inst.getOpcode();
4473 return false;
4474 }
4475
4476 // Otherwise, the match failed, try to produce a decent error message.
4477
4478 // If we had multiple suffix matches, then identify this as an ambiguous
4479 // match.
4480 if (NumSuccessfulMatches > 1) {
4481 char MatchChars[4];
4482 unsigned NumMatches = 0;
4483 for (unsigned I = 0, E = std::size(Match); I != E; ++I)
4484 if (Match[I] == Match_Success)
4485 MatchChars[NumMatches++] = Suffixes[I];
4486
4487 SmallString<126> Msg;
4488 raw_svector_ostream OS(Msg);
4489 OS << "ambiguous instructions require an explicit suffix (could be ";
4490 for (unsigned i = 0; i != NumMatches; ++i) {
4491 if (i != 0)
4492 OS << ", ";
4493 if (i + 1 == NumMatches)
4494 OS << "or ";
4495 OS << "'" << Base << MatchChars[i] << "'";
4496 }
4497 OS << ")";
4498 Error(IDLoc, OS.str(), EmptyRange, MatchingInlineAsm);
4499 return true;
4500 }
4501
4502 // Okay, we know that none of the variants matched successfully.
4503
4504 // If all of the instructions reported an invalid mnemonic, then the original
4505 // mnemonic was invalid.
4506 if (llvm::count(Match, Match_MnemonicFail) == 4) {
4507 if (OriginalError == Match_MnemonicFail)
4508 return Error(IDLoc, "invalid instruction mnemonic '" + Base + "'",
4509 Op.getLocRange(), MatchingInlineAsm);
4510
4511 if (OriginalError == Match_Unsupported)
4512 return Error(IDLoc, "unsupported instruction", EmptyRange,
4513 MatchingInlineAsm);
4514
4515 assert(OriginalError == Match_InvalidOperand && "Unexpected error");
4516 // Recover location info for the operand if we know which was the problem.
4517 if (ErrorInfo != ~0ULL) {
4518 if (ErrorInfo >= Operands.size())
4519 return Error(IDLoc, "too few operands for instruction", EmptyRange,
4520 MatchingInlineAsm);
4521
4522 X86Operand &Operand = (X86Operand &)*Operands[ErrorInfo];
4523 if (Operand.getStartLoc().isValid()) {
4524 SMRange OperandRange = Operand.getLocRange();
4525 return Error(Operand.getStartLoc(), "invalid operand for instruction",
4526 OperandRange, MatchingInlineAsm);
4527 }
4528 }
4529
4530 return Error(IDLoc, "invalid operand for instruction", EmptyRange,
4531 MatchingInlineAsm);
4532 }
4533
4534 // If one instruction matched as unsupported, report this as unsupported.
4535 if (llvm::count(Match, Match_Unsupported) == 1) {
4536 return Error(IDLoc, "unsupported instruction", EmptyRange,
4537 MatchingInlineAsm);
4538 }
4539
4540 // If one instruction matched with a missing feature, report this as a
4541 // missing feature.
4542 if (llvm::count(Match, Match_MissingFeature) == 1) {
4543 ErrorInfo = Match_MissingFeature;
4544 return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeatures,
4545 MatchingInlineAsm);
4546 }
4547
4548 // If one instruction matched with an invalid operand, report this as an
4549 // operand failure.
4550 if (llvm::count(Match, Match_InvalidOperand) == 1) {
4551 return Error(IDLoc, "invalid operand for instruction", EmptyRange,
4552 MatchingInlineAsm);
4553 }
4554
4555 // If all of these were an outright failure, report it in a useless way.
4556 Error(IDLoc, "unknown use of instruction mnemonic without a size suffix",
4557 EmptyRange, MatchingInlineAsm);
4558 return true;
4559}
4560
4561bool X86AsmParser::matchAndEmitIntelInstruction(
4562 SMLoc IDLoc, unsigned &Opcode, MCInst &Inst, OperandVector &Operands,
4563 MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm) {
4564 X86Operand &Op = static_cast<X86Operand &>(*Operands[0]);
4565 SMRange EmptyRange;
4566 // Find one unsized memory operand, if present.
4567 X86Operand *UnsizedMemOp = nullptr;
4568 for (const auto &Op : Operands) {
4569 X86Operand *X86Op = static_cast<X86Operand *>(Op.get());
4570 if (X86Op->isMemUnsized()) {
4571 UnsizedMemOp = X86Op;
4572 // Have we found an unqualified memory operand,
4573 // break. IA allows only one memory operand.
4574 break;
4575 }
4576 }
4577
4578 // Allow some instructions to have implicitly pointer-sized operands. This is
4579 // compatible with gas.
4580 StringRef Mnemonic = (static_cast<X86Operand &>(*Operands[0])).getToken();
4581 if (UnsizedMemOp) {
4582 static const char *const PtrSizedInstrs[] = {"call", "jmp", "push", "pop"};
4583 for (const char *Instr : PtrSizedInstrs) {
4584 if (Mnemonic == Instr) {
4585 UnsizedMemOp->Mem.Size = getPointerWidth();
4586 break;
4587 }
4588 }
4589 }
4590
4591 SmallVector<unsigned, 8> Match;
4592 FeatureBitset ErrorInfoMissingFeatures;
4593 FeatureBitset MissingFeatures;
4594 StringRef Base = (static_cast<X86Operand &>(*Operands[0])).getToken();
4595
4596 // If unsized push has immediate operand we should default the default pointer
4597 // size for the size.
4598 if (Mnemonic == "push" && Operands.size() == 2) {
4599 auto *X86Op = static_cast<X86Operand *>(Operands[1].get());
4600 if (X86Op->isImm()) {
4601 // If it's not a constant fall through and let remainder take care of it.
4602 const auto *CE = dyn_cast<MCConstantExpr>(X86Op->getImm());
4603 unsigned Size = getPointerWidth();
4604 if (CE &&
4605 (isIntN(Size, CE->getValue()) || isUIntN(Size, CE->getValue()))) {
4606 SmallString<16> Tmp;
4607 Tmp += Base;
4608 Tmp += (is64BitMode())
4609 ? "q"
4610 : (is32BitMode()) ? "l" : (is16BitMode()) ? "w" : " ";
4611 Op.setTokenValue(Tmp);
4612 // Do match in ATT mode to allow explicit suffix usage.
4613 Match.push_back(MatchInstruction(Operands, Inst, ErrorInfo,
4614 MissingFeatures, MatchingInlineAsm,
4615 false /*isParsingIntelSyntax()*/));
4616 Op.setTokenValue(Base);
4617 }
4618 }
4619 }
4620
4621 // If an unsized memory operand is present, try to match with each memory
4622 // operand size. In Intel assembly, the size is not part of the instruction
4623 // mnemonic.
4624 if (UnsizedMemOp && UnsizedMemOp->isMemUnsized()) {
4625 static const unsigned MopSizes[] = {8, 16, 32, 64, 80, 128, 256, 512};
4626 for (unsigned Size : MopSizes) {
4627 UnsizedMemOp->Mem.Size = Size;
4628 uint64_t ErrorInfoIgnore;
4629 unsigned LastOpcode = Inst.getOpcode();
4630 unsigned M = MatchInstruction(Operands, Inst, ErrorInfoIgnore,
4631 MissingFeatures, MatchingInlineAsm,
4632 isParsingIntelSyntax());
4633 if (Match.empty() || LastOpcode != Inst.getOpcode())
4634 Match.push_back(M);
4635
4636 // If this returned as a missing feature failure, remember that.
4637 if (Match.back() == Match_MissingFeature)
4638 ErrorInfoMissingFeatures = MissingFeatures;
4639 }
4640
4641 // Restore the size of the unsized memory operand if we modified it.
4642 UnsizedMemOp->Mem.Size = 0;
4643 }
4644
4645 // If we haven't matched anything yet, this is not a basic integer or FPU
4646 // operation. There shouldn't be any ambiguity in our mnemonic table, so try
4647 // matching with the unsized operand.
4648 if (Match.empty()) {
4649 Match.push_back(MatchInstruction(
4650 Operands, Inst, ErrorInfo, MissingFeatures, MatchingInlineAsm,
4651 isParsingIntelSyntax()));
4652 // If this returned as a missing feature failure, remember that.
4653 if (Match.back() == Match_MissingFeature)
4654 ErrorInfoMissingFeatures = MissingFeatures;
4655 }
4656
4657 // Restore the size of the unsized memory operand if we modified it.
4658 if (UnsizedMemOp)
4659 UnsizedMemOp->Mem.Size = 0;
4660
4661 // If it's a bad mnemonic, all results will be the same.
4662 if (Match.back() == Match_MnemonicFail) {
4663 return Error(IDLoc, "invalid instruction mnemonic '" + Mnemonic + "'",
4664 Op.getLocRange(), MatchingInlineAsm);
4665 }
4666
4667 unsigned NumSuccessfulMatches = llvm::count(Match, Match_Success);
4668
4669 // If matching was ambiguous and we had size information from the frontend,
4670 // try again with that. This handles cases like "movxz eax, m8/m16".
4671 if (UnsizedMemOp && NumSuccessfulMatches > 1 &&
4672 UnsizedMemOp->getMemFrontendSize()) {
4673 UnsizedMemOp->Mem.Size = UnsizedMemOp->getMemFrontendSize();
4674 unsigned M = MatchInstruction(
4675 Operands, Inst, ErrorInfo, MissingFeatures, MatchingInlineAsm,
4676 isParsingIntelSyntax());
4677 if (M == Match_Success)
4678 NumSuccessfulMatches = 1;
4679
4680 // Add a rewrite that encodes the size information we used from the
4681 // frontend.
4682 InstInfo->AsmRewrites->emplace_back(
4683 AOK_SizeDirective, UnsizedMemOp->getStartLoc(),
4684 /*Len=*/0, UnsizedMemOp->getMemFrontendSize());
4685 }
4686
4687 // If exactly one matched, then we treat that as a successful match (and the
4688 // instruction will already have been filled in correctly, since the failing
4689 // matches won't have modified it).
4690 if (NumSuccessfulMatches == 1) {
4691 if (!MatchingInlineAsm && validateInstruction(Inst, Operands))
4692 return true;
4693 // Some instructions need post-processing to, for example, tweak which
4694 // encoding is selected. Loop on it while changes happen so the individual
4695 // transformations can chain off each other.
4696 if (!MatchingInlineAsm)
4697 while (processInstruction(Inst, Operands))
4698 ;
4699 Inst.setLoc(IDLoc);
4700 if (!MatchingInlineAsm)
4701 emitInstruction(Inst, Operands, Out);
4702 Opcode = Inst.getOpcode();
4703 return false;
4704 } else if (NumSuccessfulMatches > 1) {
4705 assert(UnsizedMemOp &&
4706 "multiple matches only possible with unsized memory operands");
4707 return Error(UnsizedMemOp->getStartLoc(),
4708 "ambiguous operand size for instruction '" + Mnemonic + "\'",
4709 UnsizedMemOp->getLocRange());
4710 }
4711
4712 // If one instruction matched as unsupported, report this as unsupported.
4713 if (llvm::count(Match, Match_Unsupported) == 1) {
4714 return Error(IDLoc, "unsupported instruction", EmptyRange,
4715 MatchingInlineAsm);
4716 }
4717
4718 // If one instruction matched with a missing feature, report this as a
4719 // missing feature.
4720 if (llvm::count(Match, Match_MissingFeature) == 1) {
4721 ErrorInfo = Match_MissingFeature;
4722 return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeatures,
4723 MatchingInlineAsm);
4724 }
4725
4726 // If one instruction matched with an invalid operand, report this as an
4727 // operand failure.
4728 if (llvm::count(Match, Match_InvalidOperand) == 1) {
4729 return Error(IDLoc, "invalid operand for instruction", EmptyRange,
4730 MatchingInlineAsm);
4731 }
4732
4733 if (llvm::count(Match, Match_InvalidImmUnsignedi4) == 1) {
4734 SMLoc ErrorLoc = ((X86Operand &)*Operands[ErrorInfo]).getStartLoc();
4735 if (ErrorLoc == SMLoc())
4736 ErrorLoc = IDLoc;
4737 return Error(ErrorLoc, "immediate must be an integer in range [0, 15]",
4738 EmptyRange, MatchingInlineAsm);
4739 }
4740
4741 // If all of these were an outright failure, report it in a useless way.
4742 return Error(IDLoc, "unknown instruction mnemonic", EmptyRange,
4743 MatchingInlineAsm);
4744}
4745
4746bool X86AsmParser::omitRegisterFromClobberLists(MCRegister Reg) {
4747 return X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(Reg);
4748}
4749
4750bool X86AsmParser::ParseDirective(AsmToken DirectiveID) {
4751 MCAsmParser &Parser = getParser();
4752 StringRef IDVal = DirectiveID.getIdentifier();
4753 if (IDVal.starts_with(".arch"))
4754 return parseDirectiveArch();
4755 if (IDVal.starts_with(".code"))
4756 return ParseDirectiveCode(IDVal, DirectiveID.getLoc());
4757 else if (IDVal.starts_with(".att_syntax")) {
4758 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4759 if (Parser.getTok().getString() == "prefix")
4760 Parser.Lex();
4761 else if (Parser.getTok().getString() == "noprefix")
4762 return Error(DirectiveID.getLoc(), "'.att_syntax noprefix' is not "
4763 "supported: registers must have a "
4764 "'%' prefix in .att_syntax");
4765 }
4766 getParser().setAssemblerDialect(0);
4767 return false;
4768 } else if (IDVal.starts_with(".intel_syntax")) {
4769 getParser().setAssemblerDialect(1);
4770 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4771 if (Parser.getTok().getString() == "noprefix")
4772 Parser.Lex();
4773 else if (Parser.getTok().getString() == "prefix")
4774 return Error(DirectiveID.getLoc(), "'.intel_syntax prefix' is not "
4775 "supported: registers must not have "
4776 "a '%' prefix in .intel_syntax");
4777 }
4778 return false;
4779 } else if (IDVal == ".nops")
4780 return parseDirectiveNops(DirectiveID.getLoc());
4781 else if (IDVal == ".even")
4782 return parseDirectiveEven(DirectiveID.getLoc());
4783 else if (IDVal == ".cv_fpo_proc")
4784 return parseDirectiveFPOProc(DirectiveID.getLoc());
4785 else if (IDVal == ".cv_fpo_setframe")
4786 return parseDirectiveFPOSetFrame(DirectiveID.getLoc());
4787 else if (IDVal == ".cv_fpo_pushreg")
4788 return parseDirectiveFPOPushReg(DirectiveID.getLoc());
4789 else if (IDVal == ".cv_fpo_stackalloc")
4790 return parseDirectiveFPOStackAlloc(DirectiveID.getLoc());
4791 else if (IDVal == ".cv_fpo_stackalign")
4792 return parseDirectiveFPOStackAlign(DirectiveID.getLoc());
4793 else if (IDVal == ".cv_fpo_endprologue")
4794 return parseDirectiveFPOEndPrologue(DirectiveID.getLoc());
4795 else if (IDVal == ".cv_fpo_endproc")
4796 return parseDirectiveFPOEndProc(DirectiveID.getLoc());
4797 else if (IDVal == ".seh_pushreg" ||
4798 (Parser.isParsingMasm() && IDVal.equals_insensitive(".pushreg")))
4799 return parseDirectiveSEHPushReg(DirectiveID.getLoc());
4800 else if (IDVal == ".seh_setframe" ||
4801 (Parser.isParsingMasm() && IDVal.equals_insensitive(".setframe")))
4802 return parseDirectiveSEHSetFrame(DirectiveID.getLoc());
4803 else if (IDVal == ".seh_savereg" ||
4804 (Parser.isParsingMasm() && IDVal.equals_insensitive(".savereg")))
4805 return parseDirectiveSEHSaveReg(DirectiveID.getLoc());
4806 else if (IDVal == ".seh_savexmm" ||
4807 (Parser.isParsingMasm() && IDVal.equals_insensitive(".savexmm128")))
4808 return parseDirectiveSEHSaveXMM(DirectiveID.getLoc());
4809 else if (IDVal == ".seh_pushframe" ||
4810 (Parser.isParsingMasm() && IDVal.equals_insensitive(".pushframe")))
4811 return parseDirectiveSEHPushFrame(DirectiveID.getLoc());
4812
4813 return true;
4814}
4815
4816bool X86AsmParser::parseDirectiveArch() {
4817 // Ignore .arch for now.
4818 getParser().parseStringToEndOfStatement();
4819 return false;
4820}
4821
4822/// parseDirectiveNops
4823/// ::= .nops size[, control]
4824bool X86AsmParser::parseDirectiveNops(SMLoc L) {
4825 int64_t NumBytes = 0, Control = 0;
4826 SMLoc NumBytesLoc, ControlLoc;
4827 const MCSubtargetInfo& STI = getSTI();
4828 NumBytesLoc = getTok().getLoc();
4829 if (getParser().checkForValidSection() ||
4830 getParser().parseAbsoluteExpression(NumBytes))
4831 return true;
4832
4833 if (parseOptionalToken(AsmToken::Comma)) {
4834 ControlLoc = getTok().getLoc();
4835 if (getParser().parseAbsoluteExpression(Control))
4836 return true;
4837 }
4838 if (getParser().parseEOL())
4839 return true;
4840
4841 if (NumBytes <= 0) {
4842 Error(NumBytesLoc, "'.nops' directive with non-positive size");
4843 return false;
4844 }
4845
4846 if (Control < 0) {
4847 Error(ControlLoc, "'.nops' directive with negative NOP size");
4848 return false;
4849 }
4850
4851 /// Emit nops
4852 getParser().getStreamer().emitNops(NumBytes, Control, L, STI);
4853
4854 return false;
4855}
4856
4857/// parseDirectiveEven
4858/// ::= .even
4859bool X86AsmParser::parseDirectiveEven(SMLoc L) {
4860 if (parseEOL())
4861 return false;
4862
4863 const MCSection *Section = getStreamer().getCurrentSectionOnly();
4864 if (!Section) {
4865 getStreamer().initSections(false, getSTI());
4866 Section = getStreamer().getCurrentSectionOnly();
4867 }
4868 if (getContext().getAsmInfo()->useCodeAlign(*Section))
4869 getStreamer().emitCodeAlignment(Align(2), &getSTI(), 0);
4870 else
4871 getStreamer().emitValueToAlignment(Align(2), 0, 1, 0);
4872 return false;
4873}
4874
4875/// ParseDirectiveCode
4876/// ::= .code16 | .code32 | .code64
4877bool X86AsmParser::ParseDirectiveCode(StringRef IDVal, SMLoc L) {
4878 MCAsmParser &Parser = getParser();
4879 Code16GCC = false;
4880 if (IDVal == ".code16") {
4881 Parser.Lex();
4882 if (!is16BitMode()) {
4883 SwitchMode(X86::Is16Bit);
4884 getTargetStreamer().emitCode16();
4885 }
4886 } else if (IDVal == ".code16gcc") {
4887 // .code16gcc parses as if in 32-bit mode, but emits code in 16-bit mode.
4888 Parser.Lex();
4889 Code16GCC = true;
4890 if (!is16BitMode()) {
4891 SwitchMode(X86::Is16Bit);
4892 getTargetStreamer().emitCode16();
4893 }
4894 } else if (IDVal == ".code32") {
4895 Parser.Lex();
4896 if (!is32BitMode()) {
4897 SwitchMode(X86::Is32Bit);
4898 getTargetStreamer().emitCode32();
4899 }
4900 } else if (IDVal == ".code64") {
4901 Parser.Lex();
4902 if (!is64BitMode()) {
4903 SwitchMode(X86::Is64Bit);
4904 getTargetStreamer().emitCode64();
4905 }
4906 } else {
4907 Error(L, "unknown directive " + IDVal);
4908 return false;
4909 }
4910
4911 return false;
4912}
4913
4914// .cv_fpo_proc foo
4915bool X86AsmParser::parseDirectiveFPOProc(SMLoc L) {
4916 MCAsmParser &Parser = getParser();
4917 StringRef ProcName;
4918 int64_t ParamsSize;
4919 if (Parser.parseIdentifier(ProcName))
4920 return Parser.TokError("expected symbol name");
4921 if (Parser.parseIntToken(ParamsSize, "expected parameter byte count"))
4922 return true;
4923 if (!isUIntN(32, ParamsSize))
4924 return Parser.TokError("parameters size out of range");
4925 if (parseEOL())
4926 return true;
4927 MCSymbol *ProcSym = getContext().getOrCreateSymbol(ProcName);
4928 return getTargetStreamer().emitFPOProc(ProcSym, ParamsSize, L);
4929}
4930
4931// .cv_fpo_setframe ebp
4932bool X86AsmParser::parseDirectiveFPOSetFrame(SMLoc L) {
4933 MCRegister Reg;
4934 SMLoc DummyLoc;
4935 if (parseRegister(Reg, DummyLoc, DummyLoc) || parseEOL())
4936 return true;
4937 return getTargetStreamer().emitFPOSetFrame(Reg, L);
4938}
4939
4940// .cv_fpo_pushreg ebx
4941bool X86AsmParser::parseDirectiveFPOPushReg(SMLoc L) {
4942 MCRegister Reg;
4943 SMLoc DummyLoc;
4944 if (parseRegister(Reg, DummyLoc, DummyLoc) || parseEOL())
4945 return true;
4946 return getTargetStreamer().emitFPOPushReg(Reg, L);
4947}
4948
4949// .cv_fpo_stackalloc 20
4950bool X86AsmParser::parseDirectiveFPOStackAlloc(SMLoc L) {
4951 MCAsmParser &Parser = getParser();
4952 int64_t Offset;
4953 if (Parser.parseIntToken(Offset, "expected offset") || parseEOL())
4954 return true;
4955 return getTargetStreamer().emitFPOStackAlloc(Offset, L);
4956}
4957
4958// .cv_fpo_stackalign 8
4959bool X86AsmParser::parseDirectiveFPOStackAlign(SMLoc L) {
4960 MCAsmParser &Parser = getParser();
4961 int64_t Offset;
4962 if (Parser.parseIntToken(Offset, "expected offset") || parseEOL())
4963 return true;
4964 return getTargetStreamer().emitFPOStackAlign(Offset, L);
4965}
4966
4967// .cv_fpo_endprologue
4968bool X86AsmParser::parseDirectiveFPOEndPrologue(SMLoc L) {
4969 MCAsmParser &Parser = getParser();
4970 if (Parser.parseEOL())
4971 return true;
4972 return getTargetStreamer().emitFPOEndPrologue(L);
4973}
4974
4975// .cv_fpo_endproc
4976bool X86AsmParser::parseDirectiveFPOEndProc(SMLoc L) {
4977 MCAsmParser &Parser = getParser();
4978 if (Parser.parseEOL())
4979 return true;
4980 return getTargetStreamer().emitFPOEndProc(L);
4981}
4982
4983bool X86AsmParser::parseSEHRegisterNumber(unsigned RegClassID,
4984 MCRegister &RegNo) {
4985 SMLoc startLoc = getLexer().getLoc();
4986 const MCRegisterInfo *MRI = getContext().getRegisterInfo();
4987
4988 // Try parsing the argument as a register first.
4989 if (getLexer().getTok().isNot(AsmToken::Integer)) {
4990 SMLoc endLoc;
4991 if (parseRegister(RegNo, startLoc, endLoc))
4992 return true;
4993
4994 if (!X86MCRegisterClasses[RegClassID].contains(RegNo)) {
4995 return Error(startLoc,
4996 "register is not supported for use with this directive");
4997 }
4998 } else {
4999 // Otherwise, an integer number matching the encoding of the desired
5000 // register may appear.
5001 int64_t EncodedReg;
5002 if (getParser().parseAbsoluteExpression(EncodedReg))
5003 return true;
5004
5005 // The SEH register number is the same as the encoding register number. Map
5006 // from the encoding back to the LLVM register number.
5007 RegNo = MCRegister();
5008 for (MCPhysReg Reg : X86MCRegisterClasses[RegClassID]) {
5009 if (MRI->getEncodingValue(Reg) == EncodedReg) {
5010 RegNo = Reg;
5011 break;
5012 }
5013 }
5014 if (!RegNo) {
5015 return Error(startLoc,
5016 "incorrect register number for use with this directive");
5017 }
5018 }
5019
5020 return false;
5021}
5022
5023bool X86AsmParser::parseDirectiveSEHPushReg(SMLoc Loc) {
5024 MCRegister Reg;
5025 if (parseSEHRegisterNumber(X86::GR64RegClassID, Reg))
5026 return true;
5027
5028 if (getLexer().isNot(AsmToken::EndOfStatement))
5029 return TokError("expected end of directive");
5030
5031 getParser().Lex();
5032 getStreamer().emitWinCFIPushReg(Reg, Loc);
5033 return false;
5034}
5035
5036bool X86AsmParser::parseDirectiveSEHSetFrame(SMLoc Loc) {
5037 MCRegister Reg;
5038 int64_t Off;
5039 if (parseSEHRegisterNumber(X86::GR64RegClassID, Reg))
5040 return true;
5041 if (getLexer().isNot(AsmToken::Comma))
5042 return TokError("you must specify a stack pointer offset");
5043
5044 getParser().Lex();
5045 if (getParser().parseAbsoluteExpression(Off))
5046 return true;
5047
5048 if (getLexer().isNot(AsmToken::EndOfStatement))
5049 return TokError("expected end of directive");
5050
5051 getParser().Lex();
5052 getStreamer().emitWinCFISetFrame(Reg, Off, Loc);
5053 return false;
5054}
5055
5056bool X86AsmParser::parseDirectiveSEHSaveReg(SMLoc Loc) {
5057 MCRegister Reg;
5058 int64_t Off;
5059 if (parseSEHRegisterNumber(X86::GR64RegClassID, Reg))
5060 return true;
5061 if (getLexer().isNot(AsmToken::Comma))
5062 return TokError("you must specify an offset on the stack");
5063
5064 getParser().Lex();
5065 if (getParser().parseAbsoluteExpression(Off))
5066 return true;
5067
5068 if (getLexer().isNot(AsmToken::EndOfStatement))
5069 return TokError("expected end of directive");
5070
5071 getParser().Lex();
5072 getStreamer().emitWinCFISaveReg(Reg, Off, Loc);
5073 return false;
5074}
5075
5076bool X86AsmParser::parseDirectiveSEHSaveXMM(SMLoc Loc) {
5077 MCRegister Reg;
5078 int64_t Off;
5079 if (parseSEHRegisterNumber(X86::VR128XRegClassID, Reg))
5080 return true;
5081 if (getLexer().isNot(AsmToken::Comma))
5082 return TokError("you must specify an offset on the stack");
5083
5084 getParser().Lex();
5085 if (getParser().parseAbsoluteExpression(Off))
5086 return true;
5087
5088 if (getLexer().isNot(AsmToken::EndOfStatement))
5089 return TokError("expected end of directive");
5090
5091 getParser().Lex();
5092 getStreamer().emitWinCFISaveXMM(Reg, Off, Loc);
5093 return false;
5094}
5095
5096bool X86AsmParser::parseDirectiveSEHPushFrame(SMLoc Loc) {
5097 bool Code = false;
5098 StringRef CodeID;
5099 if (getLexer().is(AsmToken::At)) {
5100 SMLoc startLoc = getLexer().getLoc();
5101 getParser().Lex();
5102 if (!getParser().parseIdentifier(CodeID)) {
5103 if (CodeID != "code")
5104 return Error(startLoc, "expected @code");
5105 Code = true;
5106 }
5107 }
5108
5109 if (getLexer().isNot(AsmToken::EndOfStatement))
5110 return TokError("expected end of directive");
5111
5112 getParser().Lex();
5113 getStreamer().emitWinCFIPushFrame(Code, Loc);
5114 return false;
5115}
5116
5117// Force static initialization.
5122
5123#define GET_MATCHER_IMPLEMENTATION
5124#include "X86GenAsmMatcher.inc"
unsigned const MachineRegisterInfo * MRI
static MCRegister MatchRegisterName(StringRef Name)
static const char * getSubtargetFeatureName(uint64_t Val)
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
Function Alias Analysis false
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
@ Default
amode Optimize addressing mode
Value * getPointer(Value *Ptr)
static ModuleSymbolTable::Symbol getSym(DataRefImpl &Symb)
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
static LVOptions Options
Definition LVOptions.cpp:25
#define I(x, y, z)
Definition MD5.cpp:57
static bool IsVCMP(unsigned Opcode)
Register Reg
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
OptimizedStructLayoutField Field
static StringRef getName(Value *V)
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
This file defines the SmallString class.
This file defines the SmallVector class.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
DEMANGLE_NAMESPACE_BEGIN bool starts_with(std::string_view self, char C) noexcept
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
#define LLVM_C_ABI
LLVM_C_ABI is the export/visibility macro used to mark symbols declared in llvm-c as exported when bu...
Definition Visibility.h:40
static cl::opt< bool > LVIInlineAsmHardening("x86-experimental-lvi-inline-asm-hardening", cl::desc("Harden inline assembly code that may be vulnerable to Load Value" " Injection (LVI). This feature is experimental."), cl::Hidden)
static bool checkScale(unsigned Scale, StringRef &ErrMsg)
LLVM_C_ABI void LLVMInitializeX86AsmParser()
static bool convertSSEToAVX(MCInst &Inst)
static unsigned getPrefixes(OperandVector &Operands)
static bool CheckBaseRegAndIndexRegAndScale(MCRegister BaseReg, MCRegister IndexReg, unsigned Scale, bool Is64BitMode, StringRef &ErrMsg)
#define FROM_TO(FROM, TO)
uint16_t RegSizeInBits(const MCRegisterInfo &MRI, MCRegister RegNo)
Value * RHS
Value * LHS
static unsigned getSize(unsigned Kind)
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1541
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:131
void UnLex(AsmToken const &Token)
Definition AsmLexer.h:106
bool isNot(AsmToken::TokenKind K) const
Check if the current token has kind K.
Definition AsmLexer.h:150
LLVM_ABI SMLoc getLoc() const
Definition AsmLexer.cpp:31
int64_t getIntVal() const
Definition MCAsmMacro.h:108
bool isNot(TokenKind K) const
Definition MCAsmMacro.h:76
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition MCAsmMacro.h:103
bool is(TokenKind K) const
Definition MCAsmMacro.h:75
TokenKind getKind() const
Definition MCAsmMacro.h:74
LLVM_ABI SMLoc getEndLoc() const
Definition AsmLexer.cpp:33
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition MCAsmMacro.h:92
constexpr size_t size() const
bool Error(SMLoc L, const Twine &Msg, SMRange Range={})
Return an error at the location L, with the message Msg.
bool parseIntToken(int64_t &V, const Twine &ErrMsg="expected integer")
MCContext & getContext()
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual bool isParsingMasm() const
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
bool parseOptionalToken(AsmToken::TokenKind T)
Attempt to parse and consume token, returning true on success.
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc, AsmTypeInfo *TypeInfo=nullptr)=0
Parse a primary expression.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
bool TokError(const Twine &Msg, SMRange Range={})
Report an error at the current lexer location.
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
virtual bool lookUpType(StringRef Name, AsmTypeInfo &Info) const
virtual bool parseAbsoluteExpression(int64_t &Res)=0
Parse an expression which must evaluate to an absolute value.
virtual bool lookUpField(StringRef Name, AsmFieldInfo &Info) const
bool parseTokenLoc(SMLoc &Loc)
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:343
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
@ SymbolRef
References to labels and assigned expressions.
Definition MCExpr.h:43
ExprKind getKind() const
Definition MCExpr.h:85
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
unsigned getNumOperands() const
Definition MCInst.h:212
SMLoc getLoc() const
Definition MCInst.h:208
unsigned getFlags() const
Definition MCInst.h:205
void setLoc(SMLoc loc)
Definition MCInst.h:207
unsigned getOpcode() const
Definition MCInst.h:202
void setFlags(unsigned F)
Definition MCInst.h:204
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
void clear()
Definition MCInst.h:223
const MCOperand & getOperand(unsigned i) const
Definition MCInst.h:210
bool mayLoad() const
Return true if this instruction could possibly read memory.
bool isCall() const
Return true if the instruction is a call.
bool isTerminator() const
Returns true if this instruction part of the terminator for a basic block.
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isImm() const
Definition MCInst.h:66
bool isReg() const
Definition MCInst.h:65
MCRegister getReg() const
Returns the register number.
Definition MCInst.h:73
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
const FeatureBitset & getFeatureBits() const
FeatureBitset ToggleFeature(uint64_t FB)
Toggle a feature and return the re-computed feature bits.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
bool isUndefined() const
isUndefined - Check if this symbol undefined (i.e., implicitly defined).
Definition MCSymbol.h:243
StringRef getName() const
getName - Get the symbol name.
Definition MCSymbol.h:188
bool isVariable() const
isVariable - Check if this is a variable symbol.
Definition MCSymbol.h:267
const MCExpr * getVariableValue() const
Get the expression of the variable symbol.
Definition MCSymbol.h:270
MCTargetAsmParser - Generic interface to target specific assembly parsers.
static constexpr StatusTy Failure
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr unsigned id() const
Definition Register.h:100
Represents a location in source code.
Definition SMLoc.h:22
static SMLoc getFromPointer(const char *Ptr)
Definition SMLoc.h:35
constexpr const char * getPointer() const
Definition SMLoc.h:33
constexpr bool isValid() const
Definition SMLoc.h:28
void push_back(const T &Elt)
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:712
static constexpr size_t npos
Definition StringRef.h:57
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
Definition StringRef.h:667
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition StringRef.h:573
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
LLVM_ABI std::string upper() const
Convert the given ASCII string to uppercase.
char back() const
back - Get the last character in the string.
Definition StringRef.h:155
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition StringRef.h:696
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
LLVM_ABI std::string lower() const
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition StringRef.h:273
bool consume_front(char Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition StringRef.h:637
StringRef drop_back(size_t N=1) const
Return a StringRef equal to 'this' but with the last N elements dropped.
Definition StringRef.h:618
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition StringRef.h:172
static const char * getRegisterName(MCRegister Reg)
static const X86MCExpr * create(MCRegister Reg, MCContext &Ctx)
Definition X86MCExpr.h:34
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
@ X86
Windows x64, Windows Itanium (IA-64)
Definition MCAsmInfo.h:50
bool isX86_64NonExtLowByteReg(MCRegister Reg)
@ EVEX
EVEX - Specifies that this instruction use EVEX form which provides syntax support up to 32 512-bit r...
@ VEX
VEX - encoding using 0xC4/0xC5.
@ XOP
XOP - Opcode prefix used by XOP instructions.
@ ExplicitVEXPrefix
For instructions that use VEX encoding only when {vex}, {vex2} or {vex3} is present.
bool canUseApxExtendedReg(const MCInstrDesc &Desc)
bool isX86_64ExtendedReg(MCRegister Reg)
bool isApxExtendedReg(MCRegister Reg)
void emitInstruction(MCObjectStreamer &, const MCInst &Inst, const MCSubtargetInfo &STI)
@ AddrNumOperands
Definition X86BaseInfo.h:36
bool optimizeShiftRotateWithImmediateOne(MCInst &MI)
bool optimizeInstFromVEX3ToVEX2(MCInst &MI, const MCInstrDesc &Desc)
@ IP_HAS_REPEAT_NE
Definition X86BaseInfo.h:55
NodeAddr< CodeNode * > Code
Definition RDFGraph.h:388
Context & getContext() const
Definition BasicBlock.h:99
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
Definition SFrame.h:77
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
LLVM_ABI std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \t\n\v\f\r")
getToken - This function extracts one token from source, ignoring any leading characters that appear ...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
@ Done
Definition Threading.h:60
@ AOK_EndOfStatement
@ AOK_SizeDirective
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
Target & getTheX86_32Target()
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
Definition MathExtras.h:243
SmallVectorImpl< std::unique_ptr< MCParsedAsmOperand > > OperandVector
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition STLExtras.h:2042
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Definition STLExtras.h:2002
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
constexpr bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition MathExtras.h:248
Target & getTheX86_64Target()
StringRef toStringRef(bool B)
Construct a string ref from a boolean.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
bool isKind(IdKind kind) const
Definition MCAsmParser.h:66
SmallVectorImpl< AsmRewrite > * AsmRewrites
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
X86Operand - Instances of this class represent a parsed X86 machine instruction.
Definition X86Operand.h:31
SMLoc getStartLoc() const override
getStartLoc - Get the location of the first token of this operand.
Definition X86Operand.h:98
bool isImm() const override
isImm - Is this an immediate operand?
Definition X86Operand.h:223
static std::unique_ptr< X86Operand > CreateImm(const MCExpr *Val, SMLoc StartLoc, SMLoc EndLoc, StringRef SymName=StringRef(), void *OpDecl=nullptr, bool GlobalRef=true)
Definition X86Operand.h:710
static std::unique_ptr< X86Operand > CreatePrefix(unsigned Prefixes, SMLoc StartLoc, SMLoc EndLoc)
Definition X86Operand.h:704
static std::unique_ptr< X86Operand > CreateDXReg(SMLoc StartLoc, SMLoc EndLoc)
Definition X86Operand.h:699
static std::unique_ptr< X86Operand > CreateReg(MCRegister Reg, SMLoc StartLoc, SMLoc EndLoc, bool AddressOf=false, SMLoc OffsetOfLoc=SMLoc(), StringRef SymName=StringRef(), void *OpDecl=nullptr)
Definition X86Operand.h:686
SMRange getLocRange() const
getLocRange - Get the range between the first and last token of this operand.
Definition X86Operand.h:105
SMLoc getEndLoc() const override
getEndLoc - Get the location of the last token of this operand.
Definition X86Operand.h:101
bool isReg() const override
isReg - Is this a register operand?
Definition X86Operand.h:522
bool isMem() const override
isMem - Is this a memory operand?
Definition X86Operand.h:304
static std::unique_ptr< X86Operand > CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc, unsigned Size=0, StringRef SymName=StringRef(), void *OpDecl=nullptr, unsigned FrontendSize=0, bool UseUpRegs=false, bool MaybeDirectBranchDest=true)
Create an absolute memory operand.
Definition X86Operand.h:726
struct MemOp Mem
Definition X86Operand.h:86
bool isVectorReg() const
Definition X86Operand.h:538
static std::unique_ptr< X86Operand > CreateToken(StringRef Str, SMLoc Loc)
Definition X86Operand.h:677
bool isMemUnsized() const
Definition X86Operand.h:305
const MCExpr * getImm() const
Definition X86Operand.h:179
unsigned getMemFrontendSize() const
Definition X86Operand.h:212
bool isMem8() const
Definition X86Operand.h:308
MCRegister getReg() const override
Definition X86Operand.h:169