LLVM 22.0.0git
Instructions.cpp
Go to the documentation of this file.
1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "LLVMContextImpl.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstrTypes.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/IR/Operator.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Value.h"
46#include "llvm/Support/ModRef.h"
48#include <algorithm>
49#include <cassert>
50#include <cstdint>
51#include <optional>
52#include <vector>
53
54using namespace llvm;
55
57 "disable-i2p-p2i-opt", cl::init(false),
58 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
59
60//===----------------------------------------------------------------------===//
61// AllocaInst Class
62//===----------------------------------------------------------------------===//
63
64std::optional<TypeSize>
66 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
67 if (isArrayAllocation()) {
69 if (!C)
70 return std::nullopt;
71 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
72 auto CheckedProd =
73 checkedMulUnsigned(Size.getKnownMinValue(), C->getZExtValue());
74 if (!CheckedProd)
75 return std::nullopt;
76 return TypeSize::getFixed(*CheckedProd);
77 }
78 return Size;
79}
80
81std::optional<TypeSize>
83 std::optional<TypeSize> Size = getAllocationSize(DL);
84 if (!Size)
85 return std::nullopt;
86 auto CheckedProd = checkedMulUnsigned(Size->getKnownMinValue(),
87 static_cast<TypeSize::ScalarTy>(8));
88 if (!CheckedProd)
89 return std::nullopt;
90 return TypeSize::get(*CheckedProd, Size->isScalable());
91}
92
93//===----------------------------------------------------------------------===//
94// SelectInst Class
95//===----------------------------------------------------------------------===//
96
97/// areInvalidOperands - Return a string if the specified operands are invalid
98/// for a select operation, otherwise return null.
99const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
100 if (Op1->getType() != Op2->getType())
101 return "both values to select must have same type";
102
103 if (Op1->getType()->isTokenTy())
104 return "select values cannot have token type";
105
106 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
107 // Vector select.
108 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
109 return "vector select condition element type must be i1";
111 if (!ET)
112 return "selected values for vector select must be vectors";
113 if (ET->getElementCount() != VT->getElementCount())
114 return "vector select requires selected vectors to have "
115 "the same vector length as select condition";
116 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
117 return "select condition must be i1 or <n x i1>";
118 }
119 return nullptr;
120}
121
122//===----------------------------------------------------------------------===//
123// PHINode Class
124//===----------------------------------------------------------------------===//
125
126PHINode::PHINode(const PHINode &PN)
127 : Instruction(PN.getType(), Instruction::PHI, AllocMarker),
128 ReservedSpace(PN.getNumOperands()) {
131 std::copy(PN.op_begin(), PN.op_end(), op_begin());
132 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
134}
135
136// removeIncomingValue - Remove an incoming value. This is useful if a
137// predecessor basic block is deleted.
138Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
139 Value *Removed = getIncomingValue(Idx);
140 // Swap with the end of the list.
141 unsigned Last = getNumOperands() - 1;
142 if (Idx != Last) {
145 }
146
147 // Nuke the last value.
148 Op<-1>().set(nullptr);
150
151 // If the PHI node is dead, because it has zero entries, nuke it now.
152 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
153 // If anyone is using this PHI, make them use a dummy value instead...
156 }
157 return Removed;
158}
159
160void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
161 bool DeletePHIIfEmpty) {
162 unsigned NumOps = getNumIncomingValues();
163
164 // Loop backwards in case the predicate is purely index based.
165 for (unsigned Idx = NumOps; Idx-- > 0;) {
166 if (Predicate(Idx)) {
167 unsigned LastIdx = NumOps - 1;
168 if (Idx != LastIdx) {
169 setIncomingValue(Idx, getIncomingValue(LastIdx));
170 setIncomingBlock(Idx, getIncomingBlock(LastIdx));
171 }
172 getOperandUse(LastIdx).set(nullptr);
173 NumOps--;
174 }
175 }
176
178
179 // If the PHI node is dead, because it has zero entries, nuke it now.
180 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
181 // If anyone is using this PHI, make them use a dummy value instead...
184 }
185}
186
187/// growOperands - grow operands - This grows the operand list in response
188/// to a push_back style of operation. This grows the number of ops by 1.5
189/// times.
190///
191void PHINode::growOperands() {
192 unsigned e = getNumOperands();
193 unsigned NumOps = e + e / 2;
194 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
195
196 ReservedSpace = NumOps;
197 growHungoffUses(ReservedSpace, /*WithExtraValues=*/true);
198}
199
200/// hasConstantValue - If the specified PHI node always merges together the same
201/// value, return the value, otherwise return null.
203 // Exploit the fact that phi nodes always have at least one entry.
204 Value *ConstantValue = getIncomingValue(0);
205 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
206 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
207 if (ConstantValue != this)
208 return nullptr; // Incoming values not all the same.
209 // The case where the first value is this PHI.
210 ConstantValue = getIncomingValue(i);
211 }
212 if (ConstantValue == this)
213 return PoisonValue::get(getType());
214 return ConstantValue;
215}
216
217/// hasConstantOrUndefValue - Whether the specified PHI node always merges
218/// together the same value, assuming that undefs result in the same value as
219/// non-undefs.
220/// Unlike \ref hasConstantValue, this does not return a value because the
221/// unique non-undef incoming value need not dominate the PHI node.
223 Value *ConstantValue = nullptr;
224 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
226 if (Incoming != this && !isa<UndefValue>(Incoming)) {
227 if (ConstantValue && ConstantValue != Incoming)
228 return false;
229 ConstantValue = Incoming;
230 }
231 }
232 return true;
233}
234
235//===----------------------------------------------------------------------===//
236// LandingPadInst Implementation
237//===----------------------------------------------------------------------===//
238
239LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
240 const Twine &NameStr,
241 InsertPosition InsertBefore)
242 : Instruction(RetTy, Instruction::LandingPad, AllocMarker, InsertBefore) {
243 init(NumReservedValues, NameStr);
244}
245
246LandingPadInst::LandingPadInst(const LandingPadInst &LP)
247 : Instruction(LP.getType(), Instruction::LandingPad, AllocMarker),
248 ReservedSpace(LP.getNumOperands()) {
251 Use *OL = getOperandList();
252 const Use *InOL = LP.getOperandList();
253 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
254 OL[I] = InOL[I];
255
256 setCleanup(LP.isCleanup());
257}
258
259LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
260 const Twine &NameStr,
261 InsertPosition InsertBefore) {
262 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
263}
264
265void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
266 ReservedSpace = NumReservedValues;
268 allocHungoffUses(ReservedSpace);
269 setName(NameStr);
270 setCleanup(false);
271}
272
273/// growOperands - grow operands - This grows the operand list in response to a
274/// push_back style of operation. This grows the number of ops by 2 times.
275void LandingPadInst::growOperands(unsigned Size) {
276 unsigned e = getNumOperands();
277 if (ReservedSpace >= e + Size) return;
278 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
279 growHungoffUses(ReservedSpace);
280}
281
283 unsigned OpNo = getNumOperands();
284 growOperands(1);
285 assert(OpNo < ReservedSpace && "Growing didn't work!");
287 getOperandList()[OpNo] = Val;
288}
289
290//===----------------------------------------------------------------------===//
291// CallBase Implementation
292//===----------------------------------------------------------------------===//
293
295 InsertPosition InsertPt) {
296 switch (CB->getOpcode()) {
297 case Instruction::Call:
298 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
299 case Instruction::Invoke:
300 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
301 case Instruction::CallBr:
302 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
303 default:
304 llvm_unreachable("Unknown CallBase sub-class!");
305 }
306}
307
309 InsertPosition InsertPt) {
311 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
312 auto ChildOB = CI->getOperandBundleAt(i);
313 if (ChildOB.getTagName() != OpB.getTag())
314 OpDefs.emplace_back(ChildOB);
315 }
316 OpDefs.emplace_back(OpB);
317 return CallBase::Create(CI, OpDefs, InsertPt);
318}
319
321
323 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
324 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
325}
326
328 const Value *V = getCalledOperand();
329 if (isa<Function>(V) || isa<Constant>(V))
330 return false;
331 return !isInlineAsm();
332}
333
334/// Tests if this call site must be tail call optimized. Only a CallInst can
335/// be tail call optimized.
337 if (auto *CI = dyn_cast<CallInst>(this))
338 return CI->isMustTailCall();
339 return false;
340}
341
342/// Tests if this call site is marked as a tail call.
344 if (auto *CI = dyn_cast<CallInst>(this))
345 return CI->isTailCall();
346 return false;
347}
348
351 return F->getIntrinsicID();
353}
354
356 FPClassTest Mask = Attrs.getRetNoFPClass();
357
358 if (const Function *F = getCalledFunction())
359 Mask |= F->getAttributes().getRetNoFPClass();
360 return Mask;
361}
362
364 FPClassTest Mask = Attrs.getParamNoFPClass(i);
365
366 if (const Function *F = getCalledFunction())
367 Mask |= F->getAttributes().getParamNoFPClass(i);
368 return Mask;
369}
370
371std::optional<ConstantRange> CallBase::getRange() const {
372 Attribute CallAttr = Attrs.getRetAttr(Attribute::Range);
374 if (const Function *F = getCalledFunction())
375 FnAttr = F->getRetAttribute(Attribute::Range);
376
377 if (CallAttr.isValid() && FnAttr.isValid())
378 return CallAttr.getRange().intersectWith(FnAttr.getRange());
379 if (CallAttr.isValid())
380 return CallAttr.getRange();
381 if (FnAttr.isValid())
382 return FnAttr.getRange();
383 return std::nullopt;
384}
385
387 if (hasRetAttr(Attribute::NonNull))
388 return true;
389
390 if (getRetDereferenceableBytes() > 0 &&
392 return true;
393
394 return false;
395}
396
398 unsigned Index;
399
400 if (Attrs.hasAttrSomewhere(Kind, &Index))
401 return getArgOperand(Index - AttributeList::FirstArgIndex);
402 if (const Function *F = getCalledFunction())
403 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
404 return getArgOperand(Index - AttributeList::FirstArgIndex);
405
406 return nullptr;
407}
408
409/// Determine whether the argument or parameter has the given attribute.
410bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
411 assert(ArgNo < arg_size() && "Param index out of bounds!");
412
413 if (Attrs.hasParamAttr(ArgNo, Kind))
414 return true;
415
416 const Function *F = getCalledFunction();
417 if (!F)
418 return false;
419
420 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
421 return false;
422
423 // Take into account mod/ref by operand bundles.
424 switch (Kind) {
425 case Attribute::ReadNone:
427 case Attribute::ReadOnly:
429 case Attribute::WriteOnly:
430 return !hasReadingOperandBundles();
431 default:
432 return true;
433 }
434}
435
437 bool AllowUndefOrPoison) const {
439 "Argument must be a pointer");
440 if (paramHasAttr(ArgNo, Attribute::NonNull) &&
441 (AllowUndefOrPoison || paramHasAttr(ArgNo, Attribute::NoUndef)))
442 return true;
443
444 if (paramHasAttr(ArgNo, Attribute::Dereferenceable) &&
446 getCaller(),
448 return true;
449
450 return false;
451}
452
453bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
455 return F->getAttributes().hasFnAttr(Kind);
456
457 return false;
458}
459
460bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
462 return F->getAttributes().hasFnAttr(Kind);
463
464 return false;
465}
466
467template <typename AK>
468Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
469 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
470 // getMemoryEffects() correctly combines memory effects from the call-site,
471 // operand bundles and function.
472 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
473 }
474
476 return F->getAttributes().getFnAttr(Kind);
477
478 return Attribute();
479}
480
481template LLVM_ABI Attribute
482CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
483template LLVM_ABI Attribute
484CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
485
486template <typename AK>
487Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
488 AK Kind) const {
490
491 if (auto *F = dyn_cast<Function>(V))
492 return F->getAttributes().getParamAttr(ArgNo, Kind);
493
494 return Attribute();
495}
496template LLVM_ABI Attribute CallBase::getParamAttrOnCalledFunction(
497 unsigned ArgNo, Attribute::AttrKind Kind) const;
498template LLVM_ABI Attribute
499CallBase::getParamAttrOnCalledFunction(unsigned ArgNo, StringRef Kind) const;
500
503 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
505}
506
509 const unsigned BeginIndex) {
510 auto It = op_begin() + BeginIndex;
511 for (auto &B : Bundles)
512 It = std::copy(B.input_begin(), B.input_end(), It);
513
514 auto *ContextImpl = getContext().pImpl;
515 auto BI = Bundles.begin();
516 unsigned CurrentIndex = BeginIndex;
517
518 for (auto &BOI : bundle_op_infos()) {
519 assert(BI != Bundles.end() && "Incorrect allocation?");
520
521 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
522 BOI.Begin = CurrentIndex;
523 BOI.End = CurrentIndex + BI->input_size();
524 CurrentIndex = BOI.End;
525 BI++;
526 }
527
528 assert(BI == Bundles.end() && "Incorrect allocation?");
529
530 return It;
531}
532
534 /// When there isn't many bundles, we do a simple linear search.
535 /// Else fallback to a binary-search that use the fact that bundles usually
536 /// have similar number of argument to get faster convergence.
538 for (auto &BOI : bundle_op_infos())
539 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
540 return BOI;
541
542 llvm_unreachable("Did not find operand bundle for operand!");
543 }
544
545 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
547 OpIdx < std::prev(bundle_op_info_end())->End &&
548 "The Idx isn't in the operand bundle");
549
550 /// We need a decimal number below and to prevent using floating point numbers
551 /// we use an intergal value multiplied by this constant.
552 constexpr unsigned NumberScaling = 1024;
553
556 bundle_op_iterator Current = Begin;
557
558 while (Begin != End) {
559 unsigned ScaledOperandPerBundle =
560 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
561 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
562 ScaledOperandPerBundle);
563 if (Current >= End)
564 Current = std::prev(End);
565 assert(Current < End && Current >= Begin &&
566 "the operand bundle doesn't cover every value in the range");
567 if (OpIdx >= Current->Begin && OpIdx < Current->End)
568 break;
569 if (OpIdx >= Current->End)
570 Begin = Current + 1;
571 else
572 End = Current;
573 }
574
575 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
576 "the operand bundle doesn't cover every value in the range");
577 return *Current;
578}
579
582 InsertPosition InsertPt) {
583 if (CB->getOperandBundle(ID))
584 return CB;
585
587 CB->getOperandBundlesAsDefs(Bundles);
588 Bundles.push_back(OB);
589 return Create(CB, Bundles, InsertPt);
590}
591
593 InsertPosition InsertPt) {
595 bool CreateNew = false;
596
597 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
598 auto Bundle = CB->getOperandBundleAt(I);
599 if (Bundle.getTagID() == ID) {
600 CreateNew = true;
601 continue;
602 }
603 Bundles.emplace_back(Bundle);
604 }
605
606 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
607}
608
610 // Implementation note: this is a conservative implementation of operand
611 // bundle semantics, where *any* non-assume operand bundle (other than
612 // ptrauth) forces a callsite to be at least readonly.
617 getIntrinsicID() != Intrinsic::assume;
618}
619
628
630 MemoryEffects ME = getAttributes().getMemoryEffects();
631 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
632 MemoryEffects FnME = Fn->getMemoryEffects();
633 if (hasOperandBundles()) {
634 // TODO: Add a method to get memory effects for operand bundles instead.
636 FnME |= MemoryEffects::readOnly();
638 FnME |= MemoryEffects::writeOnly();
639 }
640 if (isVolatile()) {
641 // Volatile operations also access inaccessible memory.
643 }
644 ME &= FnME;
645 }
646 return ME;
647}
651
652/// Determine if the function does not access memory.
659
660/// Determine if the function does not access or only reads memory.
667
668/// Determine if the function does not access or only writes memory.
675
676/// Determine if the call can access memmory only using pointers based
677/// on its arguments.
684
685/// Determine if the function may only access memory that is
686/// inaccessible from the IR.
693
694/// Determine if the function may only access memory that is
695/// either inaccessible from the IR or pointed to by its arguments.
703
705 if (OpNo < arg_size()) {
706 // If the argument is passed byval, the callee does not have access to the
707 // original pointer and thus cannot capture it.
708 if (isByValArgument(OpNo))
709 return CaptureInfo::none();
710
712 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
713 CI &= Fn->getAttributes().getParamAttrs(OpNo).getCaptureInfo();
714 return CI;
715 }
716
717 // Bundles on assumes are captures(none).
718 if (getIntrinsicID() == Intrinsic::assume)
719 return CaptureInfo::none();
720
721 // deopt operand bundles are captures(none)
722 auto &BOI = getBundleOpInfoForOperand(OpNo);
723 auto OBU = operandBundleFromBundleOpInfo(BOI);
724 return OBU.isDeoptOperandBundle() ? CaptureInfo::none() : CaptureInfo::all();
725}
726
728 for (unsigned I = 0, E = arg_size(); I < E; ++I) {
730 continue;
731
733 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
734 CI &= Fn->getAttributes().getParamAttrs(I).getCaptureInfo();
736 return true;
737 }
738 return false;
739}
740
741//===----------------------------------------------------------------------===//
742// CallInst Implementation
743//===----------------------------------------------------------------------===//
744
745void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
746 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
747 this->FTy = FTy;
748 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
749 "NumOperands not set up?");
750
751#ifndef NDEBUG
752 assert((Args.size() == FTy->getNumParams() ||
753 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
754 "Calling a function with bad signature!");
755
756 for (unsigned i = 0; i != Args.size(); ++i)
757 assert((i >= FTy->getNumParams() ||
758 FTy->getParamType(i) == Args[i]->getType()) &&
759 "Calling a function with a bad signature!");
760#endif
761
762 // Set operands in order of their index to match use-list-order
763 // prediction.
764 llvm::copy(Args, op_begin());
765 setCalledOperand(Func);
766
767 auto It = populateBundleOperandInfos(Bundles, Args.size());
768 (void)It;
769 assert(It + 1 == op_end() && "Should add up!");
770
771 setName(NameStr);
772}
773
774void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
775 this->FTy = FTy;
776 assert(getNumOperands() == 1 && "NumOperands not set up?");
777 setCalledOperand(Func);
778
779 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
780
781 setName(NameStr);
782}
783
784CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
785 AllocInfo AllocInfo, InsertPosition InsertBefore)
786 : CallBase(Ty->getReturnType(), Instruction::Call, AllocInfo,
787 InsertBefore) {
788 init(Ty, Func, Name);
789}
790
791CallInst::CallInst(const CallInst &CI, AllocInfo AllocInfo)
792 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, AllocInfo) {
794 "Wrong number of operands allocated");
795 setTailCallKind(CI.getTailCallKind());
797
798 std::copy(CI.op_begin(), CI.op_end(), op_begin());
799 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
802}
803
805 InsertPosition InsertPt) {
806 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
807
808 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
809 Args, OpB, CI->getName(), InsertPt);
810 NewCI->setTailCallKind(CI->getTailCallKind());
811 NewCI->setCallingConv(CI->getCallingConv());
812 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
813 NewCI->setAttributes(CI->getAttributes());
814 NewCI->setDebugLoc(CI->getDebugLoc());
815 return NewCI;
816}
817
818// Update profile weight for call instruction by scaling it using the ratio
819// of S/T. The meaning of "branch_weights" meta data for call instruction is
820// transfered to represent call count.
822 if (T == 0) {
823 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
824 "div by 0. Ignoring. Likely the function "
825 << getParent()->getParent()->getName()
826 << " has 0 entry count, and contains call instructions "
827 "with non-zero prof info.");
828 return;
829 }
830 scaleProfData(*this, S, T);
831}
832
833//===----------------------------------------------------------------------===//
834// InvokeInst Implementation
835//===----------------------------------------------------------------------===//
836
837void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
838 BasicBlock *IfException, ArrayRef<Value *> Args,
840 const Twine &NameStr) {
841 this->FTy = FTy;
842
844 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
845 "NumOperands not set up?");
846
847#ifndef NDEBUG
848 assert(((Args.size() == FTy->getNumParams()) ||
849 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
850 "Invoking a function with bad signature");
851
852 for (unsigned i = 0, e = Args.size(); i != e; i++)
853 assert((i >= FTy->getNumParams() ||
854 FTy->getParamType(i) == Args[i]->getType()) &&
855 "Invoking a function with a bad signature!");
856#endif
857
858 // Set operands in order of their index to match use-list-order
859 // prediction.
860 llvm::copy(Args, op_begin());
861 setNormalDest(IfNormal);
862 setUnwindDest(IfException);
864
865 auto It = populateBundleOperandInfos(Bundles, Args.size());
866 (void)It;
867 assert(It + 3 == op_end() && "Should add up!");
868
869 setName(NameStr);
870}
871
872InvokeInst::InvokeInst(const InvokeInst &II, AllocInfo AllocInfo)
873 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, AllocInfo) {
874 assert(getNumOperands() == II.getNumOperands() &&
875 "Wrong number of operands allocated");
876 setCallingConv(II.getCallingConv());
877 std::copy(II.op_begin(), II.op_end(), op_begin());
878 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
880 SubclassOptionalData = II.SubclassOptionalData;
881}
882
884 InsertPosition InsertPt) {
885 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
886
887 auto *NewII = InvokeInst::Create(
888 II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
889 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
890 NewII->setCallingConv(II->getCallingConv());
891 NewII->SubclassOptionalData = II->SubclassOptionalData;
892 NewII->setAttributes(II->getAttributes());
893 NewII->setDebugLoc(II->getDebugLoc());
894 return NewII;
895}
896
898 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHIIt());
899}
900
902 if (T == 0) {
903 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
904 "div by 0. Ignoring. Likely the function "
905 << getParent()->getParent()->getName()
906 << " has 0 entry count, and contains call instructions "
907 "with non-zero prof info.");
908 return;
909 }
910 scaleProfData(*this, S, T);
911}
912
913//===----------------------------------------------------------------------===//
914// CallBrInst Implementation
915//===----------------------------------------------------------------------===//
916
917void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
918 ArrayRef<BasicBlock *> IndirectDests,
921 const Twine &NameStr) {
922 this->FTy = FTy;
923
924 assert(getNumOperands() == ComputeNumOperands(Args.size(),
925 IndirectDests.size(),
926 CountBundleInputs(Bundles)) &&
927 "NumOperands not set up?");
928
929#ifndef NDEBUG
930 assert(((Args.size() == FTy->getNumParams()) ||
931 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
932 "Calling a function with bad signature");
933
934 for (unsigned i = 0, e = Args.size(); i != e; i++)
935 assert((i >= FTy->getNumParams() ||
936 FTy->getParamType(i) == Args[i]->getType()) &&
937 "Calling a function with a bad signature!");
938#endif
939
940 // Set operands in order of their index to match use-list-order
941 // prediction.
942 llvm::copy(Args, op_begin());
943 NumIndirectDests = IndirectDests.size();
944 setDefaultDest(Fallthrough);
945 for (unsigned i = 0; i != NumIndirectDests; ++i)
946 setIndirectDest(i, IndirectDests[i]);
948
949 auto It = populateBundleOperandInfos(Bundles, Args.size());
950 (void)It;
951 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
952
953 setName(NameStr);
954}
955
956CallBrInst::CallBrInst(const CallBrInst &CBI, AllocInfo AllocInfo)
957 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
958 AllocInfo) {
960 "Wrong number of operands allocated");
962 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
963 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
966 NumIndirectDests = CBI.NumIndirectDests;
967}
968
969CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
970 InsertPosition InsertPt) {
971 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
972
973 auto *NewCBI = CallBrInst::Create(
974 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
975 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
976 NewCBI->setCallingConv(CBI->getCallingConv());
977 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
978 NewCBI->setAttributes(CBI->getAttributes());
979 NewCBI->setDebugLoc(CBI->getDebugLoc());
980 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
981 return NewCBI;
982}
983
984//===----------------------------------------------------------------------===//
985// ReturnInst Implementation
986//===----------------------------------------------------------------------===//
987
988ReturnInst::ReturnInst(const ReturnInst &RI, AllocInfo AllocInfo)
989 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
990 AllocInfo) {
992 "Wrong number of operands allocated");
993 if (RI.getNumOperands())
994 Op<0>() = RI.Op<0>();
996}
997
998ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, AllocInfo AllocInfo,
999 InsertPosition InsertBefore)
1000 : Instruction(Type::getVoidTy(C), Instruction::Ret, AllocInfo,
1001 InsertBefore) {
1002 if (retVal)
1003 Op<0>() = retVal;
1004}
1005
1006//===----------------------------------------------------------------------===//
1007// ResumeInst Implementation
1008//===----------------------------------------------------------------------===//
1009
1010ResumeInst::ResumeInst(const ResumeInst &RI)
1011 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1012 AllocMarker) {
1013 Op<0>() = RI.Op<0>();
1014}
1015
1016ResumeInst::ResumeInst(Value *Exn, InsertPosition InsertBefore)
1017 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1018 AllocMarker, InsertBefore) {
1019 Op<0>() = Exn;
1020}
1021
1022//===----------------------------------------------------------------------===//
1023// CleanupReturnInst Implementation
1024//===----------------------------------------------------------------------===//
1025
1026CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI,
1028 : Instruction(CRI.getType(), Instruction::CleanupRet, AllocInfo) {
1030 "Wrong number of operands allocated");
1031 setSubclassData<Instruction::OpaqueField>(
1033 Op<0>() = CRI.Op<0>();
1034 if (CRI.hasUnwindDest())
1035 Op<1>() = CRI.Op<1>();
1036}
1037
1038void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1039 if (UnwindBB)
1040 setSubclassData<UnwindDestField>(true);
1041
1042 Op<0>() = CleanupPad;
1043 if (UnwindBB)
1044 Op<1>() = UnwindBB;
1045}
1046
1047CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1049 InsertPosition InsertBefore)
1050 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1051 Instruction::CleanupRet, AllocInfo, InsertBefore) {
1052 init(CleanupPad, UnwindBB);
1053}
1054
1055//===----------------------------------------------------------------------===//
1056// CatchReturnInst Implementation
1057//===----------------------------------------------------------------------===//
1058void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1059 Op<0>() = CatchPad;
1060 Op<1>() = BB;
1061}
1062
1063CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1064 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1065 AllocMarker) {
1066 Op<0>() = CRI.Op<0>();
1067 Op<1>() = CRI.Op<1>();
1068}
1069
1070CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1071 InsertPosition InsertBefore)
1072 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1073 AllocMarker, InsertBefore) {
1074 init(CatchPad, BB);
1075}
1076
1077//===----------------------------------------------------------------------===//
1078// CatchSwitchInst Implementation
1079//===----------------------------------------------------------------------===//
1080
1081CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1082 unsigned NumReservedValues,
1083 const Twine &NameStr,
1084 InsertPosition InsertBefore)
1085 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, AllocMarker,
1086 InsertBefore) {
1087 if (UnwindDest)
1088 ++NumReservedValues;
1089 init(ParentPad, UnwindDest, NumReservedValues + 1);
1090 setName(NameStr);
1091}
1092
1093CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1094 : Instruction(CSI.getType(), Instruction::CatchSwitch, AllocMarker) {
1096 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1097 setNumHungOffUseOperands(ReservedSpace);
1098 Use *OL = getOperandList();
1099 const Use *InOL = CSI.getOperandList();
1100 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1101 OL[I] = InOL[I];
1102}
1103
1104void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1105 unsigned NumReservedValues) {
1106 assert(ParentPad && NumReservedValues);
1107
1108 ReservedSpace = NumReservedValues;
1109 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1110 allocHungoffUses(ReservedSpace);
1111
1112 Op<0>() = ParentPad;
1113 if (UnwindDest) {
1115 setUnwindDest(UnwindDest);
1116 }
1117}
1118
1119/// growOperands - grow operands - This grows the operand list in response to a
1120/// push_back style of operation. This grows the number of ops by 2 times.
1121void CatchSwitchInst::growOperands(unsigned Size) {
1122 unsigned NumOperands = getNumOperands();
1123 assert(NumOperands >= 1);
1124 if (ReservedSpace >= NumOperands + Size)
1125 return;
1126 ReservedSpace = (NumOperands + Size / 2) * 2;
1127 growHungoffUses(ReservedSpace);
1128}
1129
1131 unsigned OpNo = getNumOperands();
1132 growOperands(1);
1133 assert(OpNo < ReservedSpace && "Growing didn't work!");
1135 getOperandList()[OpNo] = Handler;
1136}
1137
1139 // Move all subsequent handlers up one.
1140 Use *EndDst = op_end() - 1;
1141 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1142 *CurDst = *(CurDst + 1);
1143 // Null out the last handler use.
1144 *EndDst = nullptr;
1145
1147}
1148
1149//===----------------------------------------------------------------------===//
1150// FuncletPadInst Implementation
1151//===----------------------------------------------------------------------===//
1152void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1153 const Twine &NameStr) {
1154 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1155 llvm::copy(Args, op_begin());
1156 setParentPad(ParentPad);
1157 setName(NameStr);
1158}
1159
1160FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI, AllocInfo AllocInfo)
1161 : Instruction(FPI.getType(), FPI.getOpcode(), AllocInfo) {
1163 "Wrong number of operands allocated");
1164 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1166}
1167
1168FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1170 const Twine &NameStr,
1171 InsertPosition InsertBefore)
1172 : Instruction(ParentPad->getType(), Op, AllocInfo, InsertBefore) {
1173 init(ParentPad, Args, NameStr);
1174}
1175
1176//===----------------------------------------------------------------------===//
1177// UnreachableInst Implementation
1178//===----------------------------------------------------------------------===//
1179
1181 InsertPosition InsertBefore)
1182 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable,
1183 AllocMarker, InsertBefore) {}
1184
1185//===----------------------------------------------------------------------===//
1186// BranchInst Implementation
1187//===----------------------------------------------------------------------===//
1188
1189void BranchInst::AssertOK() {
1190 if (isConditional())
1191 assert(getCondition()->getType()->isIntegerTy(1) &&
1192 "May only branch on boolean predicates!");
1193}
1194
1195BranchInst::BranchInst(BasicBlock *IfTrue, AllocInfo AllocInfo,
1196 InsertPosition InsertBefore)
1197 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1198 AllocInfo, InsertBefore) {
1199 assert(IfTrue && "Branch destination may not be null!");
1200 Op<-1>() = IfTrue;
1201}
1202
1203BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1204 AllocInfo AllocInfo, InsertPosition InsertBefore)
1205 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1206 AllocInfo, InsertBefore) {
1207 // Assign in order of operand index to make use-list order predictable.
1208 Op<-3>() = Cond;
1209 Op<-2>() = IfFalse;
1210 Op<-1>() = IfTrue;
1211#ifndef NDEBUG
1212 AssertOK();
1213#endif
1214}
1215
1216BranchInst::BranchInst(const BranchInst &BI, AllocInfo AllocInfo)
1217 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1218 AllocInfo) {
1220 "Wrong number of operands allocated");
1221 // Assign in order of operand index to make use-list order predictable.
1222 if (BI.getNumOperands() != 1) {
1223 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1224 Op<-3>() = BI.Op<-3>();
1225 Op<-2>() = BI.Op<-2>();
1226 }
1227 Op<-1>() = BI.Op<-1>();
1229}
1230
1233 "Cannot swap successors of an unconditional branch");
1234 Op<-1>().swap(Op<-2>());
1235
1236 // Update profile metadata if present and it matches our structural
1237 // expectations.
1239}
1240
1241//===----------------------------------------------------------------------===//
1242// AllocaInst Implementation
1243//===----------------------------------------------------------------------===//
1244
1245static Value *getAISize(LLVMContext &Context, Value *Amt) {
1246 if (!Amt)
1247 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1248 else {
1249 assert(!isa<BasicBlock>(Amt) &&
1250 "Passed basic block into allocation size parameter! Use other ctor");
1251 assert(Amt->getType()->isIntegerTy() &&
1252 "Allocation array size is not an integer!");
1253 }
1254 return Amt;
1255}
1256
1258 assert(Pos.isValid() &&
1259 "Insertion position cannot be null when alignment not provided!");
1260 BasicBlock *BB = Pos.getBasicBlock();
1261 assert(BB->getParent() &&
1262 "BB must be in a Function when alignment not provided!");
1263 const DataLayout &DL = BB->getDataLayout();
1264 return DL.getPrefTypeAlign(Ty);
1265}
1266
1267AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1268 InsertPosition InsertBefore)
1269 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1270
1271AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1272 const Twine &Name, InsertPosition InsertBefore)
1273 : AllocaInst(Ty, AddrSpace, ArraySize,
1274 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1275 InsertBefore) {}
1276
1277AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1278 Align Align, const Twine &Name,
1279 InsertPosition InsertBefore)
1280 : UnaryInstruction(PointerType::get(Ty->getContext(), AddrSpace), Alloca,
1281 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1282 AllocatedType(Ty) {
1284 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1285 setName(Name);
1286}
1287
1290 return !CI->isOne();
1291 return true;
1292}
1293
1294/// isStaticAlloca - Return true if this alloca is in the entry block of the
1295/// function and is a constant size. If so, the code generator will fold it
1296/// into the prolog/epilog code, so it is basically free.
1298 // Must be constant size.
1299 if (!isa<ConstantInt>(getArraySize())) return false;
1300
1301 // Must be in the entry block.
1302 const BasicBlock *Parent = getParent();
1303 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1304}
1305
1306//===----------------------------------------------------------------------===//
1307// LoadInst Implementation
1308//===----------------------------------------------------------------------===//
1309
1310void LoadInst::AssertOK() {
1312 "Ptr must have pointer type.");
1313}
1314
1316 assert(Pos.isValid() &&
1317 "Insertion position cannot be null when alignment not provided!");
1318 BasicBlock *BB = Pos.getBasicBlock();
1319 assert(BB->getParent() &&
1320 "BB must be in a Function when alignment not provided!");
1321 const DataLayout &DL = BB->getDataLayout();
1322 return DL.getABITypeAlign(Ty);
1323}
1324
1325LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1326 InsertPosition InsertBef)
1327 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1328
1329LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1330 InsertPosition InsertBef)
1331 : LoadInst(Ty, Ptr, Name, isVolatile,
1332 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1333
1334LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1335 Align Align, InsertPosition InsertBef)
1336 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1337 SyncScope::System, InsertBef) {}
1338
1339LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1341 InsertPosition InsertBef)
1342 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1345 setAtomic(Order, SSID);
1346 AssertOK();
1347 setName(Name);
1348}
1349
1350//===----------------------------------------------------------------------===//
1351// StoreInst Implementation
1352//===----------------------------------------------------------------------===//
1353
1354void StoreInst::AssertOK() {
1355 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1357 "Ptr must have pointer type!");
1358}
1359
1361 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1362
1364 InsertPosition InsertBefore)
1365 : StoreInst(val, addr, isVolatile,
1366 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1367 InsertBefore) {}
1368
1370 InsertPosition InsertBefore)
1372 SyncScope::System, InsertBefore) {}
1373
1375 AtomicOrdering Order, SyncScope::ID SSID,
1376 InsertPosition InsertBefore)
1377 : Instruction(Type::getVoidTy(val->getContext()), Store, AllocMarker,
1378 InsertBefore) {
1379 Op<0>() = val;
1380 Op<1>() = addr;
1383 setAtomic(Order, SSID);
1384 AssertOK();
1385}
1386
1387//===----------------------------------------------------------------------===//
1388// AtomicCmpXchgInst Implementation
1389//===----------------------------------------------------------------------===//
1390
1391void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1392 Align Alignment, AtomicOrdering SuccessOrdering,
1393 AtomicOrdering FailureOrdering,
1394 SyncScope::ID SSID) {
1395 Op<0>() = Ptr;
1396 Op<1>() = Cmp;
1397 Op<2>() = NewVal;
1398 setSuccessOrdering(SuccessOrdering);
1399 setFailureOrdering(FailureOrdering);
1400 setSyncScopeID(SSID);
1401 setAlignment(Alignment);
1402
1403 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1404 "All operands must be non-null!");
1406 "Ptr must have pointer type!");
1407 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1408 "Cmp type and NewVal type must be same!");
1409}
1410
1412 Align Alignment,
1413 AtomicOrdering SuccessOrdering,
1414 AtomicOrdering FailureOrdering,
1415 SyncScope::ID SSID,
1416 InsertPosition InsertBefore)
1417 : Instruction(
1418 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1419 AtomicCmpXchg, AllocMarker, InsertBefore) {
1420 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1421}
1422
1423//===----------------------------------------------------------------------===//
1424// AtomicRMWInst Implementation
1425//===----------------------------------------------------------------------===//
1426
1427void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1428 Align Alignment, AtomicOrdering Ordering,
1429 SyncScope::ID SSID) {
1430 assert(Ordering != AtomicOrdering::NotAtomic &&
1431 "atomicrmw instructions can only be atomic.");
1432 assert(Ordering != AtomicOrdering::Unordered &&
1433 "atomicrmw instructions cannot be unordered.");
1434 Op<0>() = Ptr;
1435 Op<1>() = Val;
1437 setOrdering(Ordering);
1438 setSyncScopeID(SSID);
1439 setAlignment(Alignment);
1440
1441 assert(getOperand(0) && getOperand(1) && "All operands must be non-null!");
1443 "Ptr must have pointer type!");
1444 assert(Ordering != AtomicOrdering::NotAtomic &&
1445 "AtomicRMW instructions must be atomic!");
1446}
1447
1449 Align Alignment, AtomicOrdering Ordering,
1450 SyncScope::ID SSID, InsertPosition InsertBefore)
1451 : Instruction(Val->getType(), AtomicRMW, AllocMarker, InsertBefore) {
1452 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1453}
1454
1456 switch (Op) {
1458 return "xchg";
1459 case AtomicRMWInst::Add:
1460 return "add";
1461 case AtomicRMWInst::Sub:
1462 return "sub";
1463 case AtomicRMWInst::And:
1464 return "and";
1466 return "nand";
1467 case AtomicRMWInst::Or:
1468 return "or";
1469 case AtomicRMWInst::Xor:
1470 return "xor";
1471 case AtomicRMWInst::Max:
1472 return "max";
1473 case AtomicRMWInst::Min:
1474 return "min";
1476 return "umax";
1478 return "umin";
1480 return "fadd";
1482 return "fsub";
1484 return "fmax";
1486 return "fmin";
1488 return "fmaximum";
1490 return "fminimum";
1492 return "uinc_wrap";
1494 return "udec_wrap";
1496 return "usub_cond";
1498 return "usub_sat";
1500 return "<invalid operation>";
1501 }
1502
1503 llvm_unreachable("invalid atomicrmw operation");
1504}
1505
1506//===----------------------------------------------------------------------===//
1507// FenceInst Implementation
1508//===----------------------------------------------------------------------===//
1509
1511 SyncScope::ID SSID, InsertPosition InsertBefore)
1512 : Instruction(Type::getVoidTy(C), Fence, AllocMarker, InsertBefore) {
1513 setOrdering(Ordering);
1514 setSyncScopeID(SSID);
1515}
1516
1517//===----------------------------------------------------------------------===//
1518// GetElementPtrInst Implementation
1519//===----------------------------------------------------------------------===//
1520
1521void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1522 const Twine &Name) {
1523 assert(getNumOperands() == 1 + IdxList.size() &&
1524 "NumOperands not initialized?");
1525 Op<0>() = Ptr;
1526 llvm::copy(IdxList, op_begin() + 1);
1527 setName(Name);
1528}
1529
1530GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI,
1532 : Instruction(GEPI.getType(), GetElementPtr, AllocInfo),
1533 SourceElementType(GEPI.SourceElementType),
1534 ResultElementType(GEPI.ResultElementType) {
1535 assert(getNumOperands() == GEPI.getNumOperands() &&
1536 "Wrong number of operands allocated");
1537 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1539}
1540
1542 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1543 if (!Struct->indexValid(Idx))
1544 return nullptr;
1545 return Struct->getTypeAtIndex(Idx);
1546 }
1547 if (!Idx->getType()->isIntOrIntVectorTy())
1548 return nullptr;
1549 if (auto *Array = dyn_cast<ArrayType>(Ty))
1550 return Array->getElementType();
1551 if (auto *Vector = dyn_cast<VectorType>(Ty))
1552 return Vector->getElementType();
1553 return nullptr;
1554}
1555
1557 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1558 if (Idx >= Struct->getNumElements())
1559 return nullptr;
1560 return Struct->getElementType(Idx);
1561 }
1562 if (auto *Array = dyn_cast<ArrayType>(Ty))
1563 return Array->getElementType();
1564 if (auto *Vector = dyn_cast<VectorType>(Ty))
1565 return Vector->getElementType();
1566 return nullptr;
1567}
1568
1569template <typename IndexTy>
1571 if (IdxList.empty())
1572 return Ty;
1573 for (IndexTy V : IdxList.slice(1)) {
1575 if (!Ty)
1576 return Ty;
1577 }
1578 return Ty;
1579}
1580
1584
1586 ArrayRef<Constant *> IdxList) {
1587 return getIndexedTypeInternal(Ty, IdxList);
1588}
1589
1593
1594/// hasAllZeroIndices - Return true if all of the indices of this GEP are
1595/// zeros. If so, the result pointer and the first operand have the same
1596/// value, just potentially different types.
1598 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1600 if (!CI->isZero()) return false;
1601 } else {
1602 return false;
1603 }
1604 }
1605 return true;
1606}
1607
1608/// hasAllConstantIndices - Return true if all of the indices of this GEP are
1609/// constant integers. If so, the result pointer and the first operand have
1610/// a constant offset between them.
1612 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1614 return false;
1615 }
1616 return true;
1617}
1618
1622
1624 GEPNoWrapFlags NW = cast<GEPOperator>(this)->getNoWrapFlags();
1625 if (B)
1627 else
1628 NW = NW.withoutInBounds();
1629 setNoWrapFlags(NW);
1630}
1631
1633 return cast<GEPOperator>(this)->getNoWrapFlags();
1634}
1635
1637 return cast<GEPOperator>(this)->isInBounds();
1638}
1639
1641 return cast<GEPOperator>(this)->hasNoUnsignedSignedWrap();
1642}
1643
1645 return cast<GEPOperator>(this)->hasNoUnsignedWrap();
1646}
1647
1649 APInt &Offset) const {
1650 // Delegate to the generic GEPOperator implementation.
1651 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1652}
1653
1655 const DataLayout &DL, unsigned BitWidth,
1656 SmallMapVector<Value *, APInt, 4> &VariableOffsets,
1657 APInt &ConstantOffset) const {
1658 // Delegate to the generic GEPOperator implementation.
1659 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1660 ConstantOffset);
1661}
1662
1663//===----------------------------------------------------------------------===//
1664// ExtractElementInst Implementation
1665//===----------------------------------------------------------------------===//
1666
1667ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1668 const Twine &Name,
1669 InsertPosition InsertBef)
1670 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1671 ExtractElement, AllocMarker, InsertBef) {
1672 assert(isValidOperands(Val, Index) &&
1673 "Invalid extractelement instruction operands!");
1674 Op<0>() = Val;
1675 Op<1>() = Index;
1676 setName(Name);
1677}
1678
1679bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1680 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1681 return false;
1682 return true;
1683}
1684
1685//===----------------------------------------------------------------------===//
1686// InsertElementInst Implementation
1687//===----------------------------------------------------------------------===//
1688
1689InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1690 const Twine &Name,
1691 InsertPosition InsertBef)
1692 : Instruction(Vec->getType(), InsertElement, AllocMarker, InsertBef) {
1693 assert(isValidOperands(Vec, Elt, Index) &&
1694 "Invalid insertelement instruction operands!");
1695 Op<0>() = Vec;
1696 Op<1>() = Elt;
1697 Op<2>() = Index;
1698 setName(Name);
1699}
1700
1702 const Value *Index) {
1703 if (!Vec->getType()->isVectorTy())
1704 return false; // First operand of insertelement must be vector type.
1705
1706 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1707 return false;// Second operand of insertelement must be vector element type.
1708
1709 if (!Index->getType()->isIntegerTy())
1710 return false; // Third operand of insertelement must be i32.
1711 return true;
1712}
1713
1714//===----------------------------------------------------------------------===//
1715// ShuffleVectorInst Implementation
1716//===----------------------------------------------------------------------===//
1717
1719 assert(V && "Cannot create placeholder of nullptr V");
1720 return PoisonValue::get(V->getType());
1721}
1722
1724 InsertPosition InsertBefore)
1726 InsertBefore) {}
1727
1729 const Twine &Name,
1730 InsertPosition InsertBefore)
1732 InsertBefore) {}
1733
1735 const Twine &Name,
1736 InsertPosition InsertBefore)
1737 : Instruction(
1738 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1739 cast<VectorType>(Mask->getType())->getElementCount()),
1740 ShuffleVector, AllocMarker, InsertBefore) {
1741 assert(isValidOperands(V1, V2, Mask) &&
1742 "Invalid shuffle vector instruction operands!");
1743
1744 Op<0>() = V1;
1745 Op<1>() = V2;
1746 SmallVector<int, 16> MaskArr;
1747 getShuffleMask(cast<Constant>(Mask), MaskArr);
1748 setShuffleMask(MaskArr);
1749 setName(Name);
1750}
1751
1753 const Twine &Name,
1754 InsertPosition InsertBefore)
1755 : Instruction(
1756 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1757 Mask.size(), isa<ScalableVectorType>(V1->getType())),
1758 ShuffleVector, AllocMarker, InsertBefore) {
1759 assert(isValidOperands(V1, V2, Mask) &&
1760 "Invalid shuffle vector instruction operands!");
1761 Op<0>() = V1;
1762 Op<1>() = V2;
1763 setShuffleMask(Mask);
1764 setName(Name);
1765}
1766
1768 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
1769 int NumMaskElts = ShuffleMask.size();
1770 SmallVector<int, 16> NewMask(NumMaskElts);
1771 for (int i = 0; i != NumMaskElts; ++i) {
1772 int MaskElt = getMaskValue(i);
1773 if (MaskElt == PoisonMaskElem) {
1774 NewMask[i] = PoisonMaskElem;
1775 continue;
1776 }
1777 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
1778 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1779 NewMask[i] = MaskElt;
1780 }
1781 setShuffleMask(NewMask);
1782 Op<0>().swap(Op<1>());
1783}
1784
1786 ArrayRef<int> Mask) {
1787 // V1 and V2 must be vectors of the same type.
1788 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
1789 return false;
1790
1791 // Make sure the mask elements make sense.
1792 int V1Size =
1793 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
1794 for (int Elem : Mask)
1795 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
1796 return false;
1797
1799 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
1800 return false;
1801
1802 return true;
1803}
1804
1806 const Value *Mask) {
1807 // V1 and V2 must be vectors of the same type.
1808 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1809 return false;
1810
1811 // Mask must be vector of i32, and must be the same kind of vector as the
1812 // input vectors
1813 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1814 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
1816 return false;
1817
1818 // Check to see if Mask is valid.
1820 return true;
1821
1822 // NOTE: Through vector ConstantInt we have the potential to support more
1823 // than just zero splat masks but that requires a LangRef change.
1824 if (isa<ScalableVectorType>(MaskTy))
1825 return false;
1826
1827 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
1828
1829 if (const auto *CI = dyn_cast<ConstantInt>(Mask))
1830 return !CI->uge(V1Size * 2);
1831
1832 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1833 for (Value *Op : MV->operands()) {
1834 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1835 if (CI->uge(V1Size*2))
1836 return false;
1837 } else if (!isa<UndefValue>(Op)) {
1838 return false;
1839 }
1840 }
1841 return true;
1842 }
1843
1844 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1845 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
1846 i != e; ++i)
1847 if (CDS->getElementAsInteger(i) >= V1Size*2)
1848 return false;
1849 return true;
1850 }
1851
1852 return false;
1853}
1854
1856 SmallVectorImpl<int> &Result) {
1857 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
1858
1859 if (isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) {
1860 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
1861 Result.append(EC.getKnownMinValue(), MaskVal);
1862 return;
1863 }
1864
1865 assert(!EC.isScalable() &&
1866 "Scalable vector shuffle mask must be undef or zeroinitializer");
1867
1868 unsigned NumElts = EC.getFixedValue();
1869
1870 Result.reserve(NumElts);
1871
1872 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1873 for (unsigned i = 0; i != NumElts; ++i)
1874 Result.push_back(CDS->getElementAsInteger(i));
1875 return;
1876 }
1877 for (unsigned i = 0; i != NumElts; ++i) {
1878 Constant *C = Mask->getAggregateElement(i);
1879 Result.push_back(isa<UndefValue>(C) ? -1 :
1880 cast<ConstantInt>(C)->getZExtValue());
1881 }
1882}
1883
1885 ShuffleMask.assign(Mask.begin(), Mask.end());
1886 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
1887}
1888
1890 Type *ResultTy) {
1891 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
1892 if (isa<ScalableVectorType>(ResultTy)) {
1893 assert(all_equal(Mask) && "Unexpected shuffle");
1894 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
1895 if (Mask[0] == 0)
1896 return Constant::getNullValue(VecTy);
1897 return PoisonValue::get(VecTy);
1898 }
1900 for (int Elem : Mask) {
1901 if (Elem == PoisonMaskElem)
1903 else
1904 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
1905 }
1906 return ConstantVector::get(MaskConst);
1907}
1908
1909static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1910 assert(!Mask.empty() && "Shuffle mask must contain elements");
1911 bool UsesLHS = false;
1912 bool UsesRHS = false;
1913 for (int I : Mask) {
1914 if (I == -1)
1915 continue;
1916 assert(I >= 0 && I < (NumOpElts * 2) &&
1917 "Out-of-bounds shuffle mask element");
1918 UsesLHS |= (I < NumOpElts);
1919 UsesRHS |= (I >= NumOpElts);
1920 if (UsesLHS && UsesRHS)
1921 return false;
1922 }
1923 // Allow for degenerate case: completely undef mask means neither source is used.
1924 return UsesLHS || UsesRHS;
1925}
1926
1928 // We don't have vector operand size information, so assume operands are the
1929 // same size as the mask.
1930 return isSingleSourceMaskImpl(Mask, NumSrcElts);
1931}
1932
1933static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1934 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
1935 return false;
1936 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1937 if (Mask[i] == -1)
1938 continue;
1939 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1940 return false;
1941 }
1942 return true;
1943}
1944
1946 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1947 return false;
1948 // We don't have vector operand size information, so assume operands are the
1949 // same size as the mask.
1950 return isIdentityMaskImpl(Mask, NumSrcElts);
1951}
1952
1954 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1955 return false;
1956 if (!isSingleSourceMask(Mask, NumSrcElts))
1957 return false;
1958
1959 // The number of elements in the mask must be at least 2.
1960 if (NumSrcElts < 2)
1961 return false;
1962
1963 for (int I = 0, E = Mask.size(); I < E; ++I) {
1964 if (Mask[I] == -1)
1965 continue;
1966 if (Mask[I] != (NumSrcElts - 1 - I) &&
1967 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
1968 return false;
1969 }
1970 return true;
1971}
1972
1974 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1975 return false;
1976 if (!isSingleSourceMask(Mask, NumSrcElts))
1977 return false;
1978 for (int I = 0, E = Mask.size(); I < E; ++I) {
1979 if (Mask[I] == -1)
1980 continue;
1981 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
1982 return false;
1983 }
1984 return true;
1985}
1986
1988 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1989 return false;
1990 // Select is differentiated from identity. It requires using both sources.
1991 if (isSingleSourceMask(Mask, NumSrcElts))
1992 return false;
1993 for (int I = 0, E = Mask.size(); I < E; ++I) {
1994 if (Mask[I] == -1)
1995 continue;
1996 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
1997 return false;
1998 }
1999 return true;
2000}
2001
2003 // Example masks that will return true:
2004 // v1 = <a, b, c, d>
2005 // v2 = <e, f, g, h>
2006 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2007 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2008
2009 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2010 return false;
2011 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2012 int Sz = Mask.size();
2013 if (Sz < 2 || !isPowerOf2_32(Sz))
2014 return false;
2015
2016 // 2. The first element of the mask must be either a 0 or a 1.
2017 if (Mask[0] != 0 && Mask[0] != 1)
2018 return false;
2019
2020 // 3. The difference between the first 2 elements must be equal to the
2021 // number of elements in the mask.
2022 if ((Mask[1] - Mask[0]) != NumSrcElts)
2023 return false;
2024
2025 // 4. The difference between consecutive even-numbered and odd-numbered
2026 // elements must be equal to 2.
2027 for (int I = 2; I < Sz; ++I) {
2028 int MaskEltVal = Mask[I];
2029 if (MaskEltVal == -1)
2030 return false;
2031 int MaskEltPrevVal = Mask[I - 2];
2032 if (MaskEltVal - MaskEltPrevVal != 2)
2033 return false;
2034 }
2035 return true;
2036}
2037
2039 int &Index) {
2040 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2041 return false;
2042 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2043 int StartIndex = -1;
2044 for (int I = 0, E = Mask.size(); I != E; ++I) {
2045 int MaskEltVal = Mask[I];
2046 if (MaskEltVal == -1)
2047 continue;
2048
2049 if (StartIndex == -1) {
2050 // Don't support a StartIndex that begins in the second input, or if the
2051 // first non-undef index would access below the StartIndex.
2052 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2053 return false;
2054
2055 StartIndex = MaskEltVal - I;
2056 continue;
2057 }
2058
2059 // Splice is sequential starting from StartIndex.
2060 if (MaskEltVal != (StartIndex + I))
2061 return false;
2062 }
2063
2064 if (StartIndex == -1)
2065 return false;
2066
2067 // NOTE: This accepts StartIndex == 0 (COPY).
2068 Index = StartIndex;
2069 return true;
2070}
2071
2073 int NumSrcElts, int &Index) {
2074 // Must extract from a single source.
2075 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2076 return false;
2077
2078 // Must be smaller (else this is an Identity shuffle).
2079 if (NumSrcElts <= (int)Mask.size())
2080 return false;
2081
2082 // Find start of extraction, accounting that we may start with an UNDEF.
2083 int SubIndex = -1;
2084 for (int i = 0, e = Mask.size(); i != e; ++i) {
2085 int M = Mask[i];
2086 if (M < 0)
2087 continue;
2088 int Offset = (M % NumSrcElts) - i;
2089 if (0 <= SubIndex && SubIndex != Offset)
2090 return false;
2091 SubIndex = Offset;
2092 }
2093
2094 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2095 Index = SubIndex;
2096 return true;
2097 }
2098 return false;
2099}
2100
2102 int NumSrcElts, int &NumSubElts,
2103 int &Index) {
2104 int NumMaskElts = Mask.size();
2105
2106 // Don't try to match if we're shuffling to a smaller size.
2107 if (NumMaskElts < NumSrcElts)
2108 return false;
2109
2110 // TODO: We don't recognize self-insertion/widening.
2111 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2112 return false;
2113
2114 // Determine which mask elements are attributed to which source.
2115 APInt UndefElts = APInt::getZero(NumMaskElts);
2116 APInt Src0Elts = APInt::getZero(NumMaskElts);
2117 APInt Src1Elts = APInt::getZero(NumMaskElts);
2118 bool Src0Identity = true;
2119 bool Src1Identity = true;
2120
2121 for (int i = 0; i != NumMaskElts; ++i) {
2122 int M = Mask[i];
2123 if (M < 0) {
2124 UndefElts.setBit(i);
2125 continue;
2126 }
2127 if (M < NumSrcElts) {
2128 Src0Elts.setBit(i);
2129 Src0Identity &= (M == i);
2130 continue;
2131 }
2132 Src1Elts.setBit(i);
2133 Src1Identity &= (M == (i + NumSrcElts));
2134 }
2135 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2136 "unknown shuffle elements");
2137 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2138 "2-source shuffle not found");
2139
2140 // Determine lo/hi span ranges.
2141 // TODO: How should we handle undefs at the start of subvector insertions?
2142 int Src0Lo = Src0Elts.countr_zero();
2143 int Src1Lo = Src1Elts.countr_zero();
2144 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2145 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2146
2147 // If src0 is in place, see if the src1 elements is inplace within its own
2148 // span.
2149 if (Src0Identity) {
2150 int NumSub1Elts = Src1Hi - Src1Lo;
2151 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2152 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2153 NumSubElts = NumSub1Elts;
2154 Index = Src1Lo;
2155 return true;
2156 }
2157 }
2158
2159 // If src1 is in place, see if the src0 elements is inplace within its own
2160 // span.
2161 if (Src1Identity) {
2162 int NumSub0Elts = Src0Hi - Src0Lo;
2163 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2164 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2165 NumSubElts = NumSub0Elts;
2166 Index = Src0Lo;
2167 return true;
2168 }
2169 }
2170
2171 return false;
2172}
2173
2175 // FIXME: Not currently possible to express a shuffle mask for a scalable
2176 // vector for this case.
2178 return false;
2179
2180 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2181 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2182 if (NumMaskElts <= NumOpElts)
2183 return false;
2184
2185 // The first part of the mask must choose elements from exactly 1 source op.
2187 if (!isIdentityMaskImpl(Mask, NumOpElts))
2188 return false;
2189
2190 // All extending must be with undef elements.
2191 for (int i = NumOpElts; i < NumMaskElts; ++i)
2192 if (Mask[i] != -1)
2193 return false;
2194
2195 return true;
2196}
2197
2199 // FIXME: Not currently possible to express a shuffle mask for a scalable
2200 // vector for this case.
2202 return false;
2203
2204 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2205 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2206 if (NumMaskElts >= NumOpElts)
2207 return false;
2208
2209 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2210}
2211
2213 // Vector concatenation is differentiated from identity with padding.
2215 return false;
2216
2217 // FIXME: Not currently possible to express a shuffle mask for a scalable
2218 // vector for this case.
2220 return false;
2221
2222 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2223 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2224 if (NumMaskElts != NumOpElts * 2)
2225 return false;
2226
2227 // Use the mask length rather than the operands' vector lengths here. We
2228 // already know that the shuffle returns a vector twice as long as the inputs,
2229 // and neither of the inputs are undef vectors. If the mask picks consecutive
2230 // elements from both inputs, then this is a concatenation of the inputs.
2231 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2232}
2233
2235 int ReplicationFactor, int VF) {
2236 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2237 "Unexpected mask size.");
2238
2239 for (int CurrElt : seq(VF)) {
2240 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2241 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2242 "Run out of mask?");
2243 Mask = Mask.drop_front(ReplicationFactor);
2244 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2245 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2246 }))
2247 return false;
2248 }
2249 assert(Mask.empty() && "Did not consume the whole mask?");
2250
2251 return true;
2252}
2253
2255 int &ReplicationFactor, int &VF) {
2256 // undef-less case is trivial.
2257 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2258 ReplicationFactor =
2259 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2260 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2261 return false;
2262 VF = Mask.size() / ReplicationFactor;
2263 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2264 }
2265
2266 // However, if the mask contains undef's, we have to enumerate possible tuples
2267 // and pick one. There are bounds on replication factor: [1, mask size]
2268 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2269 // Additionally, mask size is a replication factor multiplied by vector size,
2270 // which further significantly reduces the search space.
2271
2272 // Before doing that, let's perform basic correctness checking first.
2273 int Largest = -1;
2274 for (int MaskElt : Mask) {
2275 if (MaskElt == PoisonMaskElem)
2276 continue;
2277 // Elements must be in non-decreasing order.
2278 if (MaskElt < Largest)
2279 return false;
2280 Largest = std::max(Largest, MaskElt);
2281 }
2282
2283 // Prefer larger replication factor if all else equal.
2284 for (int PossibleReplicationFactor :
2285 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2286 if (Mask.size() % PossibleReplicationFactor != 0)
2287 continue;
2288 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2289 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2290 PossibleVF))
2291 continue;
2292 ReplicationFactor = PossibleReplicationFactor;
2293 VF = PossibleVF;
2294 return true;
2295 }
2296
2297 return false;
2298}
2299
2300bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2301 int &VF) const {
2302 // Not possible to express a shuffle mask for a scalable vector for this
2303 // case.
2305 return false;
2306
2307 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2308 if (ShuffleMask.size() % VF != 0)
2309 return false;
2310 ReplicationFactor = ShuffleMask.size() / VF;
2311
2312 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2313}
2314
2316 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2317 Mask.size() % VF != 0)
2318 return false;
2319 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2320 ArrayRef<int> SubMask = Mask.slice(K, VF);
2321 if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2322 continue;
2323 SmallBitVector Used(VF, false);
2324 for (int Idx : SubMask) {
2325 if (Idx != PoisonMaskElem && Idx < VF)
2326 Used.set(Idx);
2327 }
2328 if (!Used.all())
2329 return false;
2330 }
2331 return true;
2332}
2333
2334/// Return true if this shuffle mask is a replication mask.
2336 // Not possible to express a shuffle mask for a scalable vector for this
2337 // case.
2339 return false;
2340 if (!isSingleSourceMask(ShuffleMask, VF))
2341 return false;
2342
2343 return isOneUseSingleSourceMask(ShuffleMask, VF);
2344}
2345
2346bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2348 // shuffle_vector can only interleave fixed length vectors - for scalable
2349 // vectors, see the @llvm.vector.interleave2 intrinsic
2350 if (!OpTy)
2351 return false;
2352 unsigned OpNumElts = OpTy->getNumElements();
2353
2354 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2355}
2356
2358 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2359 SmallVectorImpl<unsigned> &StartIndexes) {
2360 unsigned NumElts = Mask.size();
2361 if (NumElts % Factor)
2362 return false;
2363
2364 unsigned LaneLen = NumElts / Factor;
2365 if (!isPowerOf2_32(LaneLen))
2366 return false;
2367
2368 StartIndexes.resize(Factor);
2369
2370 // Check whether each element matches the general interleaved rule.
2371 // Ignore undef elements, as long as the defined elements match the rule.
2372 // Outer loop processes all factors (x, y, z in the above example)
2373 unsigned I = 0, J;
2374 for (; I < Factor; I++) {
2375 unsigned SavedLaneValue;
2376 unsigned SavedNoUndefs = 0;
2377
2378 // Inner loop processes consecutive accesses (x, x+1... in the example)
2379 for (J = 0; J < LaneLen - 1; J++) {
2380 // Lane computes x's position in the Mask
2381 unsigned Lane = J * Factor + I;
2382 unsigned NextLane = Lane + Factor;
2383 int LaneValue = Mask[Lane];
2384 int NextLaneValue = Mask[NextLane];
2385
2386 // If both are defined, values must be sequential
2387 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2388 LaneValue + 1 != NextLaneValue)
2389 break;
2390
2391 // If the next value is undef, save the current one as reference
2392 if (LaneValue >= 0 && NextLaneValue < 0) {
2393 SavedLaneValue = LaneValue;
2394 SavedNoUndefs = 1;
2395 }
2396
2397 // Undefs are allowed, but defined elements must still be consecutive:
2398 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2399 // Verify this by storing the last non-undef followed by an undef
2400 // Check that following non-undef masks are incremented with the
2401 // corresponding distance.
2402 if (SavedNoUndefs > 0 && LaneValue < 0) {
2403 SavedNoUndefs++;
2404 if (NextLaneValue >= 0 &&
2405 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2406 break;
2407 }
2408 }
2409
2410 if (J < LaneLen - 1)
2411 return false;
2412
2413 int StartMask = 0;
2414 if (Mask[I] >= 0) {
2415 // Check that the start of the I range (J=0) is greater than 0
2416 StartMask = Mask[I];
2417 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2418 // StartMask defined by the last value in lane
2419 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2420 } else if (SavedNoUndefs > 0) {
2421 // StartMask defined by some non-zero value in the j loop
2422 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2423 }
2424 // else StartMask remains set to 0, i.e. all elements are undefs
2425
2426 if (StartMask < 0)
2427 return false;
2428 // We must stay within the vectors; This case can happen with undefs.
2429 if (StartMask + LaneLen > NumInputElts)
2430 return false;
2431
2432 StartIndexes[I] = StartMask;
2433 }
2434
2435 return true;
2436}
2437
2438/// Check if the mask is a DE-interleave mask of the given factor
2439/// \p Factor like:
2440/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2442 unsigned Factor,
2443 unsigned &Index) {
2444 // Check all potential start indices from 0 to (Factor - 1).
2445 for (unsigned Idx = 0; Idx < Factor; Idx++) {
2446 unsigned I = 0;
2447
2448 // Check that elements are in ascending order by Factor. Ignore undef
2449 // elements.
2450 for (; I < Mask.size(); I++)
2451 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
2452 break;
2453
2454 if (I == Mask.size()) {
2455 Index = Idx;
2456 return true;
2457 }
2458 }
2459
2460 return false;
2461}
2462
2463/// Try to lower a vector shuffle as a bit rotation.
2464///
2465/// Look for a repeated rotation pattern in each sub group.
2466/// Returns an element-wise left bit rotation amount or -1 if failed.
2467static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
2468 int NumElts = Mask.size();
2469 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
2470
2471 int RotateAmt = -1;
2472 for (int i = 0; i != NumElts; i += NumSubElts) {
2473 for (int j = 0; j != NumSubElts; ++j) {
2474 int M = Mask[i + j];
2475 if (M < 0)
2476 continue;
2477 if (M < i || M >= i + NumSubElts)
2478 return -1;
2479 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2480 if (0 <= RotateAmt && Offset != RotateAmt)
2481 return -1;
2482 RotateAmt = Offset;
2483 }
2484 }
2485 return RotateAmt;
2486}
2487
2489 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
2490 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
2491 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2492 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
2493 if (EltRotateAmt < 0)
2494 continue;
2495 RotateAmt = EltRotateAmt * EltSizeInBits;
2496 return true;
2497 }
2498
2499 return false;
2500}
2501
2502//===----------------------------------------------------------------------===//
2503// InsertValueInst Class
2504//===----------------------------------------------------------------------===//
2505
2506void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2507 const Twine &Name) {
2508 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2509
2510 // There's no fundamental reason why we require at least one index
2511 // (other than weirdness with &*IdxBegin being invalid; see
2512 // getelementptr's init routine for example). But there's no
2513 // present need to support it.
2514 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2515
2517 Val->getType() && "Inserted value must match indexed type!");
2518 Op<0>() = Agg;
2519 Op<1>() = Val;
2520
2521 Indices.append(Idxs.begin(), Idxs.end());
2522 setName(Name);
2523}
2524
2525InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2526 : Instruction(IVI.getType(), InsertValue, AllocMarker),
2527 Indices(IVI.Indices) {
2528 Op<0>() = IVI.getOperand(0);
2529 Op<1>() = IVI.getOperand(1);
2531}
2532
2533//===----------------------------------------------------------------------===//
2534// ExtractValueInst Class
2535//===----------------------------------------------------------------------===//
2536
2537void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2538 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2539
2540 // There's no fundamental reason why we require at least one index.
2541 // But there's no present need to support it.
2542 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2543
2544 Indices.append(Idxs.begin(), Idxs.end());
2545 setName(Name);
2546}
2547
2548ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2549 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0),
2550 (BasicBlock *)nullptr),
2551 Indices(EVI.Indices) {
2553}
2554
2555// getIndexedType - Returns the type of the element that would be extracted
2556// with an extractvalue instruction with the specified parameters.
2557//
2558// A null type is returned if the indices are invalid for the specified
2559// pointer type.
2560//
2562 ArrayRef<unsigned> Idxs) {
2563 for (unsigned Index : Idxs) {
2564 // We can't use CompositeType::indexValid(Index) here.
2565 // indexValid() always returns true for arrays because getelementptr allows
2566 // out-of-bounds indices. Since we don't allow those for extractvalue and
2567 // insertvalue we need to check array indexing manually.
2568 // Since the only other types we can index into are struct types it's just
2569 // as easy to check those manually as well.
2570 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2571 if (Index >= AT->getNumElements())
2572 return nullptr;
2573 Agg = AT->getElementType();
2574 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2575 if (Index >= ST->getNumElements())
2576 return nullptr;
2577 Agg = ST->getElementType(Index);
2578 } else {
2579 // Not a valid type to index into.
2580 return nullptr;
2581 }
2582 }
2583 return Agg;
2584}
2585
2586//===----------------------------------------------------------------------===//
2587// UnaryOperator Class
2588//===----------------------------------------------------------------------===//
2589
2591 const Twine &Name, InsertPosition InsertBefore)
2592 : UnaryInstruction(Ty, iType, S, InsertBefore) {
2593 Op<0>() = S;
2594 setName(Name);
2595 AssertOK();
2596}
2597
2599 InsertPosition InsertBefore) {
2600 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2601}
2602
2603void UnaryOperator::AssertOK() {
2604 Value *LHS = getOperand(0);
2605 (void)LHS; // Silence warnings.
2606#ifndef NDEBUG
2607 switch (getOpcode()) {
2608 case FNeg:
2609 assert(getType() == LHS->getType() &&
2610 "Unary operation should return same type as operand!");
2611 assert(getType()->isFPOrFPVectorTy() &&
2612 "Tried to create a floating-point operation on a "
2613 "non-floating-point type!");
2614 break;
2615 default: llvm_unreachable("Invalid opcode provided");
2616 }
2617#endif
2618}
2619
2620//===----------------------------------------------------------------------===//
2621// BinaryOperator Class
2622//===----------------------------------------------------------------------===//
2623
2625 const Twine &Name, InsertPosition InsertBefore)
2626 : Instruction(Ty, iType, AllocMarker, InsertBefore) {
2627 Op<0>() = S1;
2628 Op<1>() = S2;
2629 setName(Name);
2630 AssertOK();
2631}
2632
2633void BinaryOperator::AssertOK() {
2634 Value *LHS = getOperand(0), *RHS = getOperand(1);
2635 (void)LHS; (void)RHS; // Silence warnings.
2636 assert(LHS->getType() == RHS->getType() &&
2637 "Binary operator operand types must match!");
2638#ifndef NDEBUG
2639 switch (getOpcode()) {
2640 case Add: case Sub:
2641 case Mul:
2642 assert(getType() == LHS->getType() &&
2643 "Arithmetic operation should return same type as operands!");
2644 assert(getType()->isIntOrIntVectorTy() &&
2645 "Tried to create an integer operation on a non-integer type!");
2646 break;
2647 case FAdd: case FSub:
2648 case FMul:
2649 assert(getType() == LHS->getType() &&
2650 "Arithmetic operation should return same type as operands!");
2651 assert(getType()->isFPOrFPVectorTy() &&
2652 "Tried to create a floating-point operation on a "
2653 "non-floating-point type!");
2654 break;
2655 case UDiv:
2656 case SDiv:
2657 assert(getType() == LHS->getType() &&
2658 "Arithmetic operation should return same type as operands!");
2659 assert(getType()->isIntOrIntVectorTy() &&
2660 "Incorrect operand type (not integer) for S/UDIV");
2661 break;
2662 case FDiv:
2663 assert(getType() == LHS->getType() &&
2664 "Arithmetic operation should return same type as operands!");
2665 assert(getType()->isFPOrFPVectorTy() &&
2666 "Incorrect operand type (not floating point) for FDIV");
2667 break;
2668 case URem:
2669 case SRem:
2670 assert(getType() == LHS->getType() &&
2671 "Arithmetic operation should return same type as operands!");
2672 assert(getType()->isIntOrIntVectorTy() &&
2673 "Incorrect operand type (not integer) for S/UREM");
2674 break;
2675 case FRem:
2676 assert(getType() == LHS->getType() &&
2677 "Arithmetic operation should return same type as operands!");
2678 assert(getType()->isFPOrFPVectorTy() &&
2679 "Incorrect operand type (not floating point) for FREM");
2680 break;
2681 case Shl:
2682 case LShr:
2683 case AShr:
2684 assert(getType() == LHS->getType() &&
2685 "Shift operation should return same type as operands!");
2686 assert(getType()->isIntOrIntVectorTy() &&
2687 "Tried to create a shift operation on a non-integral type!");
2688 break;
2689 case And: case Or:
2690 case Xor:
2691 assert(getType() == LHS->getType() &&
2692 "Logical operation should return same type as operands!");
2693 assert(getType()->isIntOrIntVectorTy() &&
2694 "Tried to create a logical operation on a non-integral type!");
2695 break;
2696 default: llvm_unreachable("Invalid opcode provided");
2697 }
2698#endif
2699}
2700
2702 const Twine &Name,
2703 InsertPosition InsertBefore) {
2704 assert(S1->getType() == S2->getType() &&
2705 "Cannot create binary operator with two operands of differing type!");
2706 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2707}
2708
2710 InsertPosition InsertBefore) {
2711 Value *Zero = ConstantInt::get(Op->getType(), 0);
2712 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
2713 InsertBefore);
2714}
2715
2717 InsertPosition InsertBefore) {
2718 Value *Zero = ConstantInt::get(Op->getType(), 0);
2719 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
2720}
2721
2723 InsertPosition InsertBefore) {
2724 Constant *C = Constant::getAllOnesValue(Op->getType());
2725 return new BinaryOperator(Instruction::Xor, Op, C,
2726 Op->getType(), Name, InsertBefore);
2727}
2728
2729// Exchange the two operands to this instruction. This instruction is safe to
2730// use on any binary instruction and does not modify the semantics of the
2731// instruction.
2733 if (!isCommutative())
2734 return true; // Can't commute operands
2735 Op<0>().swap(Op<1>());
2736 return false;
2737}
2738
2739//===----------------------------------------------------------------------===//
2740// FPMathOperator Class
2741//===----------------------------------------------------------------------===//
2742
2744 const MDNode *MD =
2745 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2746 if (!MD)
2747 return 0.0;
2749 return Accuracy->getValueAPF().convertToFloat();
2750}
2751
2752//===----------------------------------------------------------------------===//
2753// CastInst Class
2754//===----------------------------------------------------------------------===//
2755
2756// Just determine if this cast only deals with integral->integral conversion.
2758 switch (getOpcode()) {
2759 default: return false;
2760 case Instruction::ZExt:
2761 case Instruction::SExt:
2762 case Instruction::Trunc:
2763 return true;
2764 case Instruction::BitCast:
2765 return getOperand(0)->getType()->isIntegerTy() &&
2766 getType()->isIntegerTy();
2767 }
2768}
2769
2770/// This function determines if the CastInst does not require any bits to be
2771/// changed in order to effect the cast. Essentially, it identifies cases where
2772/// no code gen is necessary for the cast, hence the name no-op cast. For
2773/// example, the following are all no-op casts:
2774/// # bitcast i32* %x to i8*
2775/// # bitcast <2 x i32> %x to <4 x i16>
2776/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2777/// Determine if the described cast is a no-op.
2779 Type *SrcTy,
2780 Type *DestTy,
2781 const DataLayout &DL) {
2782 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
2783 switch (Opcode) {
2784 default: llvm_unreachable("Invalid CastOp");
2785 case Instruction::Trunc:
2786 case Instruction::ZExt:
2787 case Instruction::SExt:
2788 case Instruction::FPTrunc:
2789 case Instruction::FPExt:
2790 case Instruction::UIToFP:
2791 case Instruction::SIToFP:
2792 case Instruction::FPToUI:
2793 case Instruction::FPToSI:
2794 case Instruction::AddrSpaceCast:
2795 // TODO: Target informations may give a more accurate answer here.
2796 return false;
2797 case Instruction::BitCast:
2798 return true; // BitCast never modifies bits.
2799 case Instruction::PtrToAddr:
2800 case Instruction::PtrToInt:
2801 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2802 DestTy->getScalarSizeInBits();
2803 case Instruction::IntToPtr:
2804 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2805 SrcTy->getScalarSizeInBits();
2806 }
2807}
2808
2810 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
2811}
2812
2813/// This function determines if a pair of casts can be eliminated and what
2814/// opcode should be used in the elimination. This assumes that there are two
2815/// instructions like this:
2816/// * %F = firstOpcode SrcTy %x to MidTy
2817/// * %S = secondOpcode MidTy %F to DstTy
2818/// The function returns a resultOpcode so these two casts can be replaced with:
2819/// * %Replacement = resultOpcode %SrcTy %x to DstTy
2820/// If no such cast is permitted, the function returns 0.
2822 Instruction::CastOps secondOp,
2823 Type *SrcTy, Type *MidTy, Type *DstTy,
2824 const DataLayout *DL) {
2825 // Define the 144 possibilities for these two cast instructions. The values
2826 // in this matrix determine what to do in a given situation and select the
2827 // case in the switch below. The rows correspond to firstOp, the columns
2828 // correspond to secondOp. In looking at the table below, keep in mind
2829 // the following cast properties:
2830 //
2831 // Size Compare Source Destination
2832 // Operator Src ? Size Type Sign Type Sign
2833 // -------- ------------ ------------------- ---------------------
2834 // TRUNC > Integer Any Integral Any
2835 // ZEXT < Integral Unsigned Integer Any
2836 // SEXT < Integral Signed Integer Any
2837 // FPTOUI n/a FloatPt n/a Integral Unsigned
2838 // FPTOSI n/a FloatPt n/a Integral Signed
2839 // UITOFP n/a Integral Unsigned FloatPt n/a
2840 // SITOFP n/a Integral Signed FloatPt n/a
2841 // FPTRUNC > FloatPt n/a FloatPt n/a
2842 // FPEXT < FloatPt n/a FloatPt n/a
2843 // PTRTOINT n/a Pointer n/a Integral Unsigned
2844 // PTRTOADDR n/a Pointer n/a Integral Unsigned
2845 // INTTOPTR n/a Integral Unsigned Pointer n/a
2846 // BITCAST = FirstClass n/a FirstClass n/a
2847 // ADDRSPCST n/a Pointer n/a Pointer n/a
2848 //
2849 // NOTE: some transforms are safe, but we consider them to be non-profitable.
2850 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2851 // into "fptoui double to i64", but this loses information about the range
2852 // of the produced value (we no longer know the top-part is all zeros).
2853 // Further this conversion is often much more expensive for typical hardware,
2854 // and causes issues when building libgcc. We disallow fptosi+sext for the
2855 // same reason.
2856 const unsigned numCastOps =
2857 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2858 // clang-format off
2859 static const uint8_t CastResults[numCastOps][numCastOps] = {
2860 // T F F U S F F P P I B A -+
2861 // R Z S P P I I T P 2 2 N T S |
2862 // U E E 2 2 2 2 R E I A T C C +- secondOp
2863 // N X X U S F F N X N D 2 V V |
2864 // C T T I I P P C T T R P T T -+
2865 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // Trunc -+
2866 { 8, 1, 9,99,99, 2,17,99,99,99,99, 2, 3, 0}, // ZExt |
2867 { 8, 0, 1,99,99, 0, 2,99,99,99,99, 0, 3, 0}, // SExt |
2868 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToUI |
2869 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToSI |
2870 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // UIToFP +- firstOp
2871 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // SIToFP |
2872 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // FPTrunc |
2873 { 99,99,99, 2, 2,99,99, 8, 2,99,99,99, 4, 0}, // FPExt |
2874 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 7, 3, 0}, // PtrToInt |
2875 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // PtrToAddr |
2876 { 99,99,99,99,99,99,99,99,99,11,11,99,15, 0}, // IntToPtr |
2877 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16,16, 5, 1,14}, // BitCast |
2878 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2879 };
2880 // clang-format on
2881
2882 // TODO: This logic could be encoded into the table above and handled in the
2883 // switch below.
2884 // If either of the casts are a bitcast from scalar to vector, disallow the
2885 // merging. However, any pair of bitcasts are allowed.
2886 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2887 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2888 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2889
2890 // Check if any of the casts convert scalars <-> vectors.
2891 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2892 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2893 if (!AreBothBitcasts)
2894 return 0;
2895
2896 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2897 [secondOp-Instruction::CastOpsBegin];
2898 switch (ElimCase) {
2899 case 0:
2900 // Categorically disallowed.
2901 return 0;
2902 case 1:
2903 // Allowed, use first cast's opcode.
2904 return firstOp;
2905 case 2:
2906 // Allowed, use second cast's opcode.
2907 return secondOp;
2908 case 3:
2909 // No-op cast in second op implies firstOp as long as the DestTy
2910 // is integer and we are not converting between a vector and a
2911 // non-vector type.
2912 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2913 return firstOp;
2914 return 0;
2915 case 4:
2916 // No-op cast in second op implies firstOp as long as the DestTy
2917 // matches MidTy.
2918 if (DstTy == MidTy)
2919 return firstOp;
2920 return 0;
2921 case 5:
2922 // No-op cast in first op implies secondOp as long as the SrcTy
2923 // is an integer.
2924 if (SrcTy->isIntegerTy())
2925 return secondOp;
2926 return 0;
2927 case 7: {
2928 // Disable inttoptr/ptrtoint optimization if enabled.
2929 if (DisableI2pP2iOpt)
2930 return 0;
2931
2932 // Cannot simplify if address spaces are different!
2933 if (SrcTy != DstTy)
2934 return 0;
2935
2936 // Cannot simplify if the intermediate integer size is smaller than the
2937 // pointer size.
2938 unsigned MidSize = MidTy->getScalarSizeInBits();
2939 if (!DL || MidSize < DL->getPointerTypeSizeInBits(SrcTy))
2940 return 0;
2941
2942 return Instruction::BitCast;
2943 }
2944 case 8: {
2945 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
2946 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2947 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2948 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2949 unsigned DstSize = DstTy->getScalarSizeInBits();
2950 if (SrcTy == DstTy)
2951 return Instruction::BitCast;
2952 if (SrcSize < DstSize)
2953 return firstOp;
2954 if (SrcSize > DstSize)
2955 return secondOp;
2956 return 0;
2957 }
2958 case 9:
2959 // zext, sext -> zext, because sext can't sign extend after zext
2960 return Instruction::ZExt;
2961 case 11: {
2962 // inttoptr, ptrtoint/ptrtoaddr -> integer cast
2963 if (!DL)
2964 return 0;
2965 unsigned MidSize = secondOp == Instruction::PtrToAddr
2966 ? DL->getAddressSizeInBits(MidTy)
2967 : DL->getPointerTypeSizeInBits(MidTy);
2968 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2969 unsigned DstSize = DstTy->getScalarSizeInBits();
2970 // If the middle size is smaller than both source and destination,
2971 // an additional masking operation would be required.
2972 if (MidSize < SrcSize && MidSize < DstSize)
2973 return 0;
2974 if (DstSize < SrcSize)
2975 return Instruction::Trunc;
2976 if (DstSize > SrcSize)
2977 return Instruction::ZExt;
2978 return Instruction::BitCast;
2979 }
2980 case 12:
2981 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
2982 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2983 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2984 return Instruction::AddrSpaceCast;
2985 return Instruction::BitCast;
2986 case 13:
2987 // FIXME: this state can be merged with (1), but the following assert
2988 // is useful to check the correcteness of the sequence due to semantic
2989 // change of bitcast.
2990 assert(
2991 SrcTy->isPtrOrPtrVectorTy() &&
2992 MidTy->isPtrOrPtrVectorTy() &&
2993 DstTy->isPtrOrPtrVectorTy() &&
2994 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
2995 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2996 "Illegal addrspacecast, bitcast sequence!");
2997 // Allowed, use first cast's opcode
2998 return firstOp;
2999 case 14:
3000 // bitcast, addrspacecast -> addrspacecast
3001 return Instruction::AddrSpaceCast;
3002 case 15:
3003 // FIXME: this state can be merged with (1), but the following assert
3004 // is useful to check the correcteness of the sequence due to semantic
3005 // change of bitcast.
3006 assert(
3007 SrcTy->isIntOrIntVectorTy() &&
3008 MidTy->isPtrOrPtrVectorTy() &&
3009 DstTy->isPtrOrPtrVectorTy() &&
3010 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3011 "Illegal inttoptr, bitcast sequence!");
3012 // Allowed, use first cast's opcode
3013 return firstOp;
3014 case 16:
3015 // FIXME: this state can be merged with (2), but the following assert
3016 // is useful to check the correcteness of the sequence due to semantic
3017 // change of bitcast.
3018 assert(
3019 SrcTy->isPtrOrPtrVectorTy() &&
3020 MidTy->isPtrOrPtrVectorTy() &&
3021 DstTy->isIntOrIntVectorTy() &&
3022 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3023 "Illegal bitcast, ptrtoint sequence!");
3024 // Allowed, use second cast's opcode
3025 return secondOp;
3026 case 17:
3027 // (sitofp (zext x)) -> (uitofp x)
3028 return Instruction::UIToFP;
3029 case 99:
3030 // Cast combination can't happen (error in input). This is for all cases
3031 // where the MidTy is not the same for the two cast instructions.
3032 llvm_unreachable("Invalid Cast Combination");
3033 default:
3034 llvm_unreachable("Error in CastResults table!!!");
3035 }
3036}
3037
3039 const Twine &Name, InsertPosition InsertBefore) {
3040 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3041 // Construct and return the appropriate CastInst subclass
3042 switch (op) {
3043 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3044 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3045 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3046 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3047 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3048 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3049 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3050 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3051 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3052 case PtrToAddr: return new PtrToAddrInst (S, Ty, Name, InsertBefore);
3053 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3054 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3055 case BitCast:
3056 return new BitCastInst(S, Ty, Name, InsertBefore);
3057 case AddrSpaceCast:
3058 return new AddrSpaceCastInst(S, Ty, Name, InsertBefore);
3059 default:
3060 llvm_unreachable("Invalid opcode provided");
3061 }
3062}
3063
3065 InsertPosition InsertBefore) {
3066 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3067 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3068 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3069}
3070
3072 InsertPosition InsertBefore) {
3073 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3074 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3075 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3076}
3077
3079 InsertPosition InsertBefore) {
3080 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3081 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3082 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3083}
3084
3085/// Create a BitCast or a PtrToInt cast instruction
3087 InsertPosition InsertBefore) {
3088 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3089 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3090 "Invalid cast");
3091 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3092 assert((!Ty->isVectorTy() ||
3093 cast<VectorType>(Ty)->getElementCount() ==
3094 cast<VectorType>(S->getType())->getElementCount()) &&
3095 "Invalid cast");
3096
3097 if (Ty->isIntOrIntVectorTy())
3098 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3099
3100 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3101}
3102
3104 Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore) {
3105 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3106 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3107
3108 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3109 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3110
3111 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3112}
3113
3115 const Twine &Name,
3116 InsertPosition InsertBefore) {
3117 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3118 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3119 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3120 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3121
3122 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3123}
3124
3126 const Twine &Name,
3127 InsertPosition InsertBefore) {
3128 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3129 "Invalid integer cast");
3130 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3131 unsigned DstBits = Ty->getScalarSizeInBits();
3132 Instruction::CastOps opcode =
3133 (SrcBits == DstBits ? Instruction::BitCast :
3134 (SrcBits > DstBits ? Instruction::Trunc :
3135 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3136 return Create(opcode, C, Ty, Name, InsertBefore);
3137}
3138
3140 InsertPosition InsertBefore) {
3141 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3142 "Invalid cast");
3143 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3144 unsigned DstBits = Ty->getScalarSizeInBits();
3145 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
3146 Instruction::CastOps opcode =
3147 (SrcBits == DstBits ? Instruction::BitCast :
3148 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3149 return Create(opcode, C, Ty, Name, InsertBefore);
3150}
3151
3152bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3153 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3154 return false;
3155
3156 if (SrcTy == DestTy)
3157 return true;
3158
3159 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3160 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3161 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3162 // An element by element cast. Valid if casting the elements is valid.
3163 SrcTy = SrcVecTy->getElementType();
3164 DestTy = DestVecTy->getElementType();
3165 }
3166 }
3167 }
3168
3169 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3170 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3171 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3172 }
3173 }
3174
3175 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3176 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3177
3178 // Could still have vectors of pointers if the number of elements doesn't
3179 // match
3180 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3181 return false;
3182
3183 if (SrcBits != DestBits)
3184 return false;
3185
3186 return true;
3187}
3188
3190 const DataLayout &DL) {
3191 // ptrtoint and inttoptr are not allowed on non-integral pointers
3192 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3193 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3194 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3195 !DL.isNonIntegralPointerType(PtrTy));
3196 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3197 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3198 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3199 !DL.isNonIntegralPointerType(PtrTy));
3200
3201 return isBitCastable(SrcTy, DestTy);
3202}
3203
3204// Provide a way to get a "cast" where the cast opcode is inferred from the
3205// types and size of the operand. This, basically, is a parallel of the
3206// logic in the castIsValid function below. This axiom should hold:
3207// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3208// should not assert in castIsValid. In other words, this produces a "correct"
3209// casting opcode for the arguments passed to it.
3212 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3213 Type *SrcTy = Src->getType();
3214
3215 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3216 "Only first class types are castable!");
3217
3218 if (SrcTy == DestTy)
3219 return BitCast;
3220
3221 // FIXME: Check address space sizes here
3222 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3223 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3224 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3225 // An element by element cast. Find the appropriate opcode based on the
3226 // element types.
3227 SrcTy = SrcVecTy->getElementType();
3228 DestTy = DestVecTy->getElementType();
3229 }
3230
3231 // Get the bit sizes, we'll need these
3232 // FIXME: This doesn't work for scalable vector types with different element
3233 // counts that don't call getElementType above.
3234 unsigned SrcBits =
3235 SrcTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3236 unsigned DestBits =
3237 DestTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3238
3239 // Run through the possibilities ...
3240 if (DestTy->isIntegerTy()) { // Casting to integral
3241 if (SrcTy->isIntegerTy()) { // Casting from integral
3242 if (DestBits < SrcBits)
3243 return Trunc; // int -> smaller int
3244 else if (DestBits > SrcBits) { // its an extension
3245 if (SrcIsSigned)
3246 return SExt; // signed -> SEXT
3247 else
3248 return ZExt; // unsigned -> ZEXT
3249 } else {
3250 return BitCast; // Same size, No-op cast
3251 }
3252 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3253 if (DestIsSigned)
3254 return FPToSI; // FP -> sint
3255 else
3256 return FPToUI; // FP -> uint
3257 } else if (SrcTy->isVectorTy()) {
3258 assert(DestBits == SrcBits &&
3259 "Casting vector to integer of different width");
3260 return BitCast; // Same size, no-op cast
3261 } else {
3262 assert(SrcTy->isPointerTy() &&
3263 "Casting from a value that is not first-class type");
3264 return PtrToInt; // ptr -> int
3265 }
3266 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3267 if (SrcTy->isIntegerTy()) { // Casting from integral
3268 if (SrcIsSigned)
3269 return SIToFP; // sint -> FP
3270 else
3271 return UIToFP; // uint -> FP
3272 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3273 if (DestBits < SrcBits) {
3274 return FPTrunc; // FP -> smaller FP
3275 } else if (DestBits > SrcBits) {
3276 return FPExt; // FP -> larger FP
3277 } else {
3278 return BitCast; // same size, no-op cast
3279 }
3280 } else if (SrcTy->isVectorTy()) {
3281 assert(DestBits == SrcBits &&
3282 "Casting vector to floating point of different width");
3283 return BitCast; // same size, no-op cast
3284 }
3285 llvm_unreachable("Casting pointer or non-first class to float");
3286 } else if (DestTy->isVectorTy()) {
3287 assert(DestBits == SrcBits &&
3288 "Illegal cast to vector (wrong type or size)");
3289 return BitCast;
3290 } else if (DestTy->isPointerTy()) {
3291 if (SrcTy->isPointerTy()) {
3292 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3293 return AddrSpaceCast;
3294 return BitCast; // ptr -> ptr
3295 } else if (SrcTy->isIntegerTy()) {
3296 return IntToPtr; // int -> ptr
3297 }
3298 llvm_unreachable("Casting pointer to other than pointer or int");
3299 }
3300 llvm_unreachable("Casting to type that is not first-class");
3301}
3302
3303//===----------------------------------------------------------------------===//
3304// CastInst SubClass Constructors
3305//===----------------------------------------------------------------------===//
3306
3307/// Check that the construction parameters for a CastInst are correct. This
3308/// could be broken out into the separate constructors but it is useful to have
3309/// it in one place and to eliminate the redundant code for getting the sizes
3310/// of the types involved.
3311bool
3313 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3314 SrcTy->isAggregateType() || DstTy->isAggregateType())
3315 return false;
3316
3317 // Get the size of the types in bits, and whether we are dealing
3318 // with vector types, we'll need this later.
3319 bool SrcIsVec = isa<VectorType>(SrcTy);
3320 bool DstIsVec = isa<VectorType>(DstTy);
3321 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3322 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3323
3324 // If these are vector types, get the lengths of the vectors (using zero for
3325 // scalar types means that checking that vector lengths match also checks that
3326 // scalars are not being converted to vectors or vectors to scalars).
3327 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3329 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3331
3332 // Switch on the opcode provided
3333 switch (op) {
3334 default: return false; // This is an input error
3335 case Instruction::Trunc:
3336 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3337 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3338 case Instruction::ZExt:
3339 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3340 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3341 case Instruction::SExt:
3342 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3343 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3344 case Instruction::FPTrunc:
3345 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3346 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3347 case Instruction::FPExt:
3348 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3349 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3350 case Instruction::UIToFP:
3351 case Instruction::SIToFP:
3352 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3353 SrcEC == DstEC;
3354 case Instruction::FPToUI:
3355 case Instruction::FPToSI:
3356 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3357 SrcEC == DstEC;
3358 case Instruction::PtrToAddr:
3359 case Instruction::PtrToInt:
3360 if (SrcEC != DstEC)
3361 return false;
3362 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3363 case Instruction::IntToPtr:
3364 if (SrcEC != DstEC)
3365 return false;
3366 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3367 case Instruction::BitCast: {
3368 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3369 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3370
3371 // BitCast implies a no-op cast of type only. No bits change.
3372 // However, you can't cast pointers to anything but pointers.
3373 if (!SrcPtrTy != !DstPtrTy)
3374 return false;
3375
3376 // For non-pointer cases, the cast is okay if the source and destination bit
3377 // widths are identical.
3378 if (!SrcPtrTy)
3379 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3380
3381 // If both are pointers then the address spaces must match.
3382 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3383 return false;
3384
3385 // A vector of pointers must have the same number of elements.
3386 if (SrcIsVec && DstIsVec)
3387 return SrcEC == DstEC;
3388 if (SrcIsVec)
3389 return SrcEC == ElementCount::getFixed(1);
3390 if (DstIsVec)
3391 return DstEC == ElementCount::getFixed(1);
3392
3393 return true;
3394 }
3395 case Instruction::AddrSpaceCast: {
3396 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3397 if (!SrcPtrTy)
3398 return false;
3399
3400 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3401 if (!DstPtrTy)
3402 return false;
3403
3404 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3405 return false;
3406
3407 return SrcEC == DstEC;
3408 }
3409 }
3410}
3411
3413 InsertPosition InsertBefore)
3414 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3415 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3416}
3417
3418ZExtInst::ZExtInst(Value *S, Type *Ty, const Twine &Name,
3419 InsertPosition InsertBefore)
3420 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3421 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3422}
3423
3424SExtInst::SExtInst(Value *S, Type *Ty, const Twine &Name,
3425 InsertPosition InsertBefore)
3426 : CastInst(Ty, SExt, S, Name, InsertBefore) {
3427 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3428}
3429
3431 InsertPosition InsertBefore)
3432 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3433 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3434}
3435
3437 InsertPosition InsertBefore)
3438 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3439 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3440}
3441
3443 InsertPosition InsertBefore)
3444 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3445 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3446}
3447
3449 InsertPosition InsertBefore)
3450 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3451 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3452}
3453
3455 InsertPosition InsertBefore)
3456 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3457 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3458}
3459
3461 InsertPosition InsertBefore)
3462 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3463 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3464}
3465
3467 InsertPosition InsertBefore)
3468 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3469 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3470}
3471
3473 InsertPosition InsertBefore)
3474 : CastInst(Ty, PtrToAddr, S, Name, InsertBefore) {
3475 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToAddr");
3476}
3477
3479 InsertPosition InsertBefore)
3480 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3481 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3482}
3483
3485 InsertPosition InsertBefore)
3486 : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3487 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3488}
3489
3491 InsertPosition InsertBefore)
3492 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3493 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3494}
3495
3496//===----------------------------------------------------------------------===//
3497// CmpInst Classes
3498//===----------------------------------------------------------------------===//
3499
3501 Value *RHS, const Twine &Name, InsertPosition InsertBefore,
3502 Instruction *FlagsSource)
3503 : Instruction(ty, op, AllocMarker, InsertBefore) {
3504 Op<0>() = LHS;
3505 Op<1>() = RHS;
3506 setPredicate(predicate);
3507 setName(Name);
3508 if (FlagsSource)
3509 copyIRFlags(FlagsSource);
3510}
3511
3513 const Twine &Name, InsertPosition InsertBefore) {
3514 if (Op == Instruction::ICmp) {
3515 if (InsertBefore.isValid())
3516 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3517 S1, S2, Name);
3518 else
3519 return new ICmpInst(CmpInst::Predicate(predicate),
3520 S1, S2, Name);
3521 }
3522
3523 if (InsertBefore.isValid())
3524 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3525 S1, S2, Name);
3526 else
3527 return new FCmpInst(CmpInst::Predicate(predicate),
3528 S1, S2, Name);
3529}
3530
3532 Value *S2,
3533 const Instruction *FlagsSource,
3534 const Twine &Name,
3535 InsertPosition InsertBefore) {
3536 CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);
3537 Inst->copyIRFlags(FlagsSource);
3538 return Inst;
3539}
3540
3542 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3543 IC->swapOperands();
3544 else
3545 cast<FCmpInst>(this)->swapOperands();
3546}
3547
3549 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3550 return IC->isCommutative();
3551 return cast<FCmpInst>(this)->isCommutative();
3552}
3553
3556 return ICmpInst::isEquality(P);
3558 return FCmpInst::isEquality(P);
3559 llvm_unreachable("Unsupported predicate kind");
3560}
3561
3562// Returns true if either operand of CmpInst is a provably non-zero
3563// floating-point constant.
3564static bool hasNonZeroFPOperands(const CmpInst *Cmp) {
3565 auto *LHS = dyn_cast<Constant>(Cmp->getOperand(0));
3566 auto *RHS = dyn_cast<Constant>(Cmp->getOperand(1));
3567 if (auto *Const = LHS ? LHS : RHS) {
3568 using namespace llvm::PatternMatch;
3569 return match(Const, m_NonZeroNotDenormalFP());
3570 }
3571 return false;
3572}
3573
3574// Floating-point equality is not an equivalence when comparing +0.0 with
3575// -0.0, when comparing NaN with another value, or when flushing
3576// denormals-to-zero.
3577bool CmpInst::isEquivalence(bool Invert) const {
3578 switch (Invert ? getInversePredicate() : getPredicate()) {
3580 return true;
3582 if (!hasNoNaNs())
3583 return false;
3584 [[fallthrough]];
3586 return hasNonZeroFPOperands(this);
3587 default:
3588 return false;
3589 }
3590}
3591
3593 switch (pred) {
3594 default: llvm_unreachable("Unknown cmp predicate!");
3595 case ICMP_EQ: return ICMP_NE;
3596 case ICMP_NE: return ICMP_EQ;
3597 case ICMP_UGT: return ICMP_ULE;
3598 case ICMP_ULT: return ICMP_UGE;
3599 case ICMP_UGE: return ICMP_ULT;
3600 case ICMP_ULE: return ICMP_UGT;
3601 case ICMP_SGT: return ICMP_SLE;
3602 case ICMP_SLT: return ICMP_SGE;
3603 case ICMP_SGE: return ICMP_SLT;
3604 case ICMP_SLE: return ICMP_SGT;
3605
3606 case FCMP_OEQ: return FCMP_UNE;
3607 case FCMP_ONE: return FCMP_UEQ;
3608 case FCMP_OGT: return FCMP_ULE;
3609 case FCMP_OLT: return FCMP_UGE;
3610 case FCMP_OGE: return FCMP_ULT;
3611 case FCMP_OLE: return FCMP_UGT;
3612 case FCMP_UEQ: return FCMP_ONE;
3613 case FCMP_UNE: return FCMP_OEQ;
3614 case FCMP_UGT: return FCMP_OLE;
3615 case FCMP_ULT: return FCMP_OGE;
3616 case FCMP_UGE: return FCMP_OLT;
3617 case FCMP_ULE: return FCMP_OGT;
3618 case FCMP_ORD: return FCMP_UNO;
3619 case FCMP_UNO: return FCMP_ORD;
3620 case FCMP_TRUE: return FCMP_FALSE;
3621 case FCMP_FALSE: return FCMP_TRUE;
3622 }
3623}
3624
3626 switch (Pred) {
3627 default: return "unknown";
3628 case FCmpInst::FCMP_FALSE: return "false";
3629 case FCmpInst::FCMP_OEQ: return "oeq";
3630 case FCmpInst::FCMP_OGT: return "ogt";
3631 case FCmpInst::FCMP_OGE: return "oge";
3632 case FCmpInst::FCMP_OLT: return "olt";
3633 case FCmpInst::FCMP_OLE: return "ole";
3634 case FCmpInst::FCMP_ONE: return "one";
3635 case FCmpInst::FCMP_ORD: return "ord";
3636 case FCmpInst::FCMP_UNO: return "uno";
3637 case FCmpInst::FCMP_UEQ: return "ueq";
3638 case FCmpInst::FCMP_UGT: return "ugt";
3639 case FCmpInst::FCMP_UGE: return "uge";
3640 case FCmpInst::FCMP_ULT: return "ult";
3641 case FCmpInst::FCMP_ULE: return "ule";
3642 case FCmpInst::FCMP_UNE: return "une";
3643 case FCmpInst::FCMP_TRUE: return "true";
3644 case ICmpInst::ICMP_EQ: return "eq";
3645 case ICmpInst::ICMP_NE: return "ne";
3646 case ICmpInst::ICMP_SGT: return "sgt";
3647 case ICmpInst::ICMP_SGE: return "sge";
3648 case ICmpInst::ICMP_SLT: return "slt";
3649 case ICmpInst::ICMP_SLE: return "sle";
3650 case ICmpInst::ICMP_UGT: return "ugt";
3651 case ICmpInst::ICMP_UGE: return "uge";
3652 case ICmpInst::ICMP_ULT: return "ult";
3653 case ICmpInst::ICMP_ULE: return "ule";
3654 }
3655}
3656
3658 OS << CmpInst::getPredicateName(Pred);
3659 return OS;
3660}
3661
3663 switch (pred) {
3664 default: llvm_unreachable("Unknown icmp predicate!");
3665 case ICMP_EQ: case ICMP_NE:
3666 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3667 return pred;
3668 case ICMP_UGT: return ICMP_SGT;
3669 case ICMP_ULT: return ICMP_SLT;
3670 case ICMP_UGE: return ICMP_SGE;
3671 case ICMP_ULE: return ICMP_SLE;
3672 }
3673}
3674
3676 switch (pred) {
3677 default: llvm_unreachable("Unknown icmp predicate!");
3678 case ICMP_EQ: case ICMP_NE:
3679 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3680 return pred;
3681 case ICMP_SGT: return ICMP_UGT;
3682 case ICMP_SLT: return ICMP_ULT;
3683 case ICMP_SGE: return ICMP_UGE;
3684 case ICMP_SLE: return ICMP_ULE;
3685 }
3686}
3687
3689 switch (pred) {
3690 default: llvm_unreachable("Unknown cmp predicate!");
3691 case ICMP_EQ: case ICMP_NE:
3692 return pred;
3693 case ICMP_SGT: return ICMP_SLT;
3694 case ICMP_SLT: return ICMP_SGT;
3695 case ICMP_SGE: return ICMP_SLE;
3696 case ICMP_SLE: return ICMP_SGE;
3697 case ICMP_UGT: return ICMP_ULT;
3698 case ICMP_ULT: return ICMP_UGT;
3699 case ICMP_UGE: return ICMP_ULE;
3700 case ICMP_ULE: return ICMP_UGE;
3701
3702 case FCMP_FALSE: case FCMP_TRUE:
3703 case FCMP_OEQ: case FCMP_ONE:
3704 case FCMP_UEQ: case FCMP_UNE:
3705 case FCMP_ORD: case FCMP_UNO:
3706 return pred;
3707 case FCMP_OGT: return FCMP_OLT;
3708 case FCMP_OLT: return FCMP_OGT;
3709 case FCMP_OGE: return FCMP_OLE;
3710 case FCMP_OLE: return FCMP_OGE;
3711 case FCMP_UGT: return FCMP_ULT;
3712 case FCMP_ULT: return FCMP_UGT;
3713 case FCMP_UGE: return FCMP_ULE;
3714 case FCMP_ULE: return FCMP_UGE;
3715 }
3716}
3717
3719 switch (pred) {
3720 case ICMP_SGE:
3721 case ICMP_SLE:
3722 case ICMP_UGE:
3723 case ICMP_ULE:
3724 case FCMP_OGE:
3725 case FCMP_OLE:
3726 case FCMP_UGE:
3727 case FCMP_ULE:
3728 return true;
3729 default:
3730 return false;
3731 }
3732}
3733
3735 switch (pred) {
3736 case ICMP_SGT:
3737 case ICMP_SLT:
3738 case ICMP_UGT:
3739 case ICMP_ULT:
3740 case FCMP_OGT:
3741 case FCMP_OLT:
3742 case FCMP_UGT:
3743 case FCMP_ULT:
3744 return true;
3745 default:
3746 return false;
3747 }
3748}
3749
3751 switch (pred) {
3752 case ICMP_SGE:
3753 return ICMP_SGT;
3754 case ICMP_SLE:
3755 return ICMP_SLT;
3756 case ICMP_UGE:
3757 return ICMP_UGT;
3758 case ICMP_ULE:
3759 return ICMP_ULT;
3760 case FCMP_OGE:
3761 return FCMP_OGT;
3762 case FCMP_OLE:
3763 return FCMP_OLT;
3764 case FCMP_UGE:
3765 return FCMP_UGT;
3766 case FCMP_ULE:
3767 return FCMP_ULT;
3768 default:
3769 return pred;
3770 }
3771}
3772
3774 switch (pred) {
3775 case ICMP_SGT:
3776 return ICMP_SGE;
3777 case ICMP_SLT:
3778 return ICMP_SLE;
3779 case ICMP_UGT:
3780 return ICMP_UGE;
3781 case ICMP_ULT:
3782 return ICMP_ULE;
3783 case FCMP_OGT:
3784 return FCMP_OGE;
3785 case FCMP_OLT:
3786 return FCMP_OLE;
3787 case FCMP_UGT:
3788 return FCMP_UGE;
3789 case FCMP_ULT:
3790 return FCMP_ULE;
3791 default:
3792 return pred;
3793 }
3794}
3795
3797 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
3798
3799 if (isStrictPredicate(pred))
3800 return getNonStrictPredicate(pred);
3801 if (isNonStrictPredicate(pred))
3802 return getStrictPredicate(pred);
3803
3804 llvm_unreachable("Unknown predicate!");
3805}
3806
3808 switch (predicate) {
3809 default: return false;
3811 case ICmpInst::ICMP_UGE: return true;
3812 }
3813}
3814
3816 switch (predicate) {
3817 default: return false;
3819 case ICmpInst::ICMP_SGE: return true;
3820 }
3821}
3822
3823bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
3824 ICmpInst::Predicate Pred) {
3825 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
3826 switch (Pred) {
3828 return LHS.eq(RHS);
3830 return LHS.ne(RHS);
3832 return LHS.ugt(RHS);
3834 return LHS.uge(RHS);
3836 return LHS.ult(RHS);
3838 return LHS.ule(RHS);
3840 return LHS.sgt(RHS);
3842 return LHS.sge(RHS);
3844 return LHS.slt(RHS);
3846 return LHS.sle(RHS);
3847 default:
3848 llvm_unreachable("Unexpected non-integer predicate.");
3849 };
3850}
3851
3852bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
3853 FCmpInst::Predicate Pred) {
3854 APFloat::cmpResult R = LHS.compare(RHS);
3855 switch (Pred) {
3856 default:
3857 llvm_unreachable("Invalid FCmp Predicate");
3859 return false;
3861 return true;
3862 case FCmpInst::FCMP_UNO:
3863 return R == APFloat::cmpUnordered;
3864 case FCmpInst::FCMP_ORD:
3865 return R != APFloat::cmpUnordered;
3866 case FCmpInst::FCMP_UEQ:
3867 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
3868 case FCmpInst::FCMP_OEQ:
3869 return R == APFloat::cmpEqual;
3870 case FCmpInst::FCMP_UNE:
3871 return R != APFloat::cmpEqual;
3872 case FCmpInst::FCMP_ONE:
3874 case FCmpInst::FCMP_ULT:
3875 return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
3876 case FCmpInst::FCMP_OLT:
3877 return R == APFloat::cmpLessThan;
3878 case FCmpInst::FCMP_UGT:
3880 case FCmpInst::FCMP_OGT:
3881 return R == APFloat::cmpGreaterThan;
3882 case FCmpInst::FCMP_ULE:
3883 return R != APFloat::cmpGreaterThan;
3884 case FCmpInst::FCMP_OLE:
3885 return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
3886 case FCmpInst::FCMP_UGE:
3887 return R != APFloat::cmpLessThan;
3888 case FCmpInst::FCMP_OGE:
3889 return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
3890 }
3891}
3892
3893std::optional<bool> ICmpInst::compare(const KnownBits &LHS,
3894 const KnownBits &RHS,
3895 ICmpInst::Predicate Pred) {
3896 switch (Pred) {
3897 case ICmpInst::ICMP_EQ:
3898 return KnownBits::eq(LHS, RHS);
3899 case ICmpInst::ICMP_NE:
3900 return KnownBits::ne(LHS, RHS);
3901 case ICmpInst::ICMP_UGE:
3902 return KnownBits::uge(LHS, RHS);
3903 case ICmpInst::ICMP_UGT:
3904 return KnownBits::ugt(LHS, RHS);
3905 case ICmpInst::ICMP_ULE:
3906 return KnownBits::ule(LHS, RHS);
3907 case ICmpInst::ICMP_ULT:
3908 return KnownBits::ult(LHS, RHS);
3909 case ICmpInst::ICMP_SGE:
3910 return KnownBits::sge(LHS, RHS);
3911 case ICmpInst::ICMP_SGT:
3912 return KnownBits::sgt(LHS, RHS);
3913 case ICmpInst::ICMP_SLE:
3914 return KnownBits::sle(LHS, RHS);
3915 case ICmpInst::ICMP_SLT:
3916 return KnownBits::slt(LHS, RHS);
3917 default:
3918 llvm_unreachable("Unexpected non-integer predicate.");
3919 }
3920}
3921
3923 if (CmpInst::isEquality(pred))
3924 return pred;
3925 if (isSigned(pred))
3926 return getUnsignedPredicate(pred);
3927 if (isUnsigned(pred))
3928 return getSignedPredicate(pred);
3929
3930 llvm_unreachable("Unknown predicate!");
3931}
3932
3934 switch (predicate) {
3935 default: return false;
3938 case FCmpInst::FCMP_ORD: return true;
3939 }
3940}
3941
3943 switch (predicate) {
3944 default: return false;
3947 case FCmpInst::FCMP_UNO: return true;
3948 }
3949}
3950
3952 switch(predicate) {
3953 default: return false;
3954 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3955 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3956 }
3957}
3958
3960 switch(predicate) {
3961 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3962 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3963 default: return false;
3964 }
3965}
3966
3968 // If the predicates match, then we know the first condition implies the
3969 // second is true.
3970 if (CmpPredicate::getMatching(Pred1, Pred2))
3971 return true;
3972
3973 if (Pred1.hasSameSign() && CmpInst::isSigned(Pred2))
3975 else if (Pred2.hasSameSign() && CmpInst::isSigned(Pred1))
3977
3978 switch (Pred1) {
3979 default:
3980 break;
3981 case CmpInst::ICMP_EQ:
3982 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3983 return Pred2 == CmpInst::ICMP_UGE || Pred2 == CmpInst::ICMP_ULE ||
3984 Pred2 == CmpInst::ICMP_SGE || Pred2 == CmpInst::ICMP_SLE;
3985 case CmpInst::ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3986 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_UGE;
3987 case CmpInst::ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3988 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_ULE;
3989 case CmpInst::ICMP_SGT: // A >s B implies A != B and A >=s B are true.
3990 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SGE;
3991 case CmpInst::ICMP_SLT: // A <s B implies A != B and A <=s B are true.
3992 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SLE;
3993 }
3994 return false;
3995}
3996
3998 CmpPredicate Pred2) {
3999 return isImpliedTrueByMatchingCmp(Pred1,
4001}
4002
4004 CmpPredicate Pred2) {
4005 if (isImpliedTrueByMatchingCmp(Pred1, Pred2))
4006 return true;
4007 if (isImpliedFalseByMatchingCmp(Pred1, Pred2))
4008 return false;
4009 return std::nullopt;
4010}
4011
4012//===----------------------------------------------------------------------===//
4013// CmpPredicate Implementation
4014//===----------------------------------------------------------------------===//
4015
4016std::optional<CmpPredicate> CmpPredicate::getMatching(CmpPredicate A,
4017 CmpPredicate B) {
4018 if (A.Pred == B.Pred)
4019 return A.HasSameSign == B.HasSameSign ? A : CmpPredicate(A.Pred);
4021 return {};
4022 if (A.HasSameSign &&
4024 return B.Pred;
4025 if (B.HasSameSign &&
4027 return A.Pred;
4028 return {};
4029}
4030
4034
4036 if (auto *ICI = dyn_cast<ICmpInst>(Cmp))
4037 return ICI->getCmpPredicate();
4038 return Cmp->getPredicate();
4039}
4040
4044
4046 return getSwapped(get(Cmp));
4047}
4048
4049//===----------------------------------------------------------------------===//
4050// SwitchInst Implementation
4051//===----------------------------------------------------------------------===//
4052
4053void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4054 assert(Value && Default && NumReserved);
4055 ReservedSpace = NumReserved;
4057 allocHungoffUses(ReservedSpace);
4058
4059 Op<0>() = Value;
4060 Op<1>() = Default;
4061}
4062
4063/// SwitchInst ctor - Create a new switch instruction, specifying a value to
4064/// switch on and a default destination. The number of additional cases can
4065/// be specified here to make memory allocation more efficient. This
4066/// constructor can also autoinsert before another instruction.
4067SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4068 InsertPosition InsertBefore)
4069 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4070 AllocMarker, InsertBefore) {
4071 init(Value, Default, 2 + NumCases);
4072}
4073
4074SwitchInst::SwitchInst(const SwitchInst &SI)
4075 : Instruction(SI.getType(), Instruction::Switch, AllocMarker) {
4076 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
4077 setNumHungOffUseOperands(SI.getNumOperands());
4078 Use *OL = getOperandList();
4079 ConstantInt **VL = case_values();
4080 const Use *InOL = SI.getOperandList();
4081 ConstantInt *const *InVL = SI.case_values();
4082 for (unsigned i = 2, E = SI.getNumOperands(); i != E; ++i) {
4083 OL[i] = InOL[i];
4084 VL[i - 2] = InVL[i - 2];
4085 }
4086 SubclassOptionalData = SI.SubclassOptionalData;
4087}
4088
4089/// addCase - Add an entry to the switch instruction...
4090///
4092 unsigned NewCaseIdx = getNumCases();
4093 unsigned OpNo = getNumOperands();
4094 if (OpNo + 1 > ReservedSpace)
4095 growOperands(); // Get more space!
4096 // Initialize some new operands.
4097 assert(OpNo < ReservedSpace && "Growing didn't work!");
4098 setNumHungOffUseOperands(OpNo + 1);
4099 CaseHandle Case(this, NewCaseIdx);
4100 Case.setValue(OnVal);
4101 Case.setSuccessor(Dest);
4102}
4103
4104/// removeCase - This method removes the specified case and its successor
4105/// from the switch instruction.
4107 unsigned idx = I->getCaseIndex();
4108
4109 assert(2 + idx < getNumOperands() && "Case index out of range!!!");
4110
4111 unsigned NumOps = getNumOperands();
4112 Use *OL = getOperandList();
4113 ConstantInt **VL = case_values();
4114
4115 // Overwrite this case with the end of the list.
4116 if (2 + idx + 1 != NumOps) {
4117 OL[2 + idx] = OL[NumOps - 1];
4118 VL[idx] = VL[NumOps - 2 - 1];
4119 }
4120
4121 // Nuke the last value.
4122 OL[NumOps - 1].set(nullptr);
4123 VL[NumOps - 2 - 1] = nullptr;
4125
4126 return CaseIt(this, idx);
4127}
4128
4129/// growOperands - grow operands - This grows the operand list in response
4130/// to a push_back style of operation. This grows the number of ops by 3 times.
4131///
4132void SwitchInst::growOperands() {
4133 unsigned e = getNumOperands();
4134 unsigned NumOps = e*3;
4135
4136 ReservedSpace = NumOps;
4137 growHungoffUses(ReservedSpace, /*WithExtraValues=*/true);
4138}
4139
4141 MDNode *ProfileData = getBranchWeightMDNode(SI);
4142 if (!ProfileData)
4143 return;
4144
4145 if (getNumBranchWeights(*ProfileData) != SI.getNumSuccessors()) {
4146 llvm_unreachable("number of prof branch_weights metadata operands does "
4147 "not correspond to number of succesors");
4148 }
4149
4151 if (!extractBranchWeights(ProfileData, Weights))
4152 return;
4153 this->Weights = std::move(Weights);
4154}
4155
4158 if (Weights) {
4159 assert(SI.getNumSuccessors() == Weights->size() &&
4160 "num of prof branch_weights must accord with num of successors");
4161 Changed = true;
4162 // Copy the last case to the place of the removed one and shrink.
4163 // This is tightly coupled with the way SwitchInst::removeCase() removes
4164 // the cases in SwitchInst::removeCase(CaseIt).
4165 (*Weights)[I->getCaseIndex() + 1] = Weights->back();
4166 Weights->pop_back();
4167 }
4168 return SI.removeCase(I);
4169}
4170
4172 auto *DestBlock = I->getCaseSuccessor();
4173 if (Weights) {
4174 auto Weight = getSuccessorWeight(I->getCaseIndex() + 1);
4175 (*Weights)[0] = Weight.value();
4176 }
4177
4178 SI.setDefaultDest(DestBlock);
4179}
4180
4182 ConstantInt *OnVal, BasicBlock *Dest,
4184 SI.addCase(OnVal, Dest);
4185
4186 if (!Weights && W && *W) {
4187 Changed = true;
4188 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4189 (*Weights)[SI.getNumSuccessors() - 1] = *W;
4190 } else if (Weights) {
4191 Changed = true;
4192 Weights->push_back(W.value_or(0));
4193 }
4194 if (Weights)
4195 assert(SI.getNumSuccessors() == Weights->size() &&
4196 "num of prof branch_weights must accord with num of successors");
4197}
4198
4201 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4202 Changed = false;
4203 if (Weights)
4204 Weights->resize(0);
4205 return SI.eraseFromParent();
4206}
4207
4210 if (!Weights)
4211 return std::nullopt;
4212 return (*Weights)[idx];
4213}
4214
4217 if (!W)
4218 return;
4219
4220 if (!Weights && *W)
4221 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4222
4223 if (Weights) {
4224 auto &OldW = (*Weights)[idx];
4225 if (*W != OldW) {
4226 Changed = true;
4227 OldW = *W;
4228 }
4229 }
4230}
4231
4234 unsigned idx) {
4235 if (MDNode *ProfileData = getBranchWeightMDNode(SI))
4236 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4237 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4238 ->getValue()
4239 .getZExtValue();
4240
4241 return std::nullopt;
4242}
4243
4244//===----------------------------------------------------------------------===//
4245// IndirectBrInst Implementation
4246//===----------------------------------------------------------------------===//
4247
4248void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4249 assert(Address && Address->getType()->isPointerTy() &&
4250 "Address of indirectbr must be a pointer");
4251 ReservedSpace = 1+NumDests;
4253 allocHungoffUses(ReservedSpace);
4254
4255 Op<0>() = Address;
4256}
4257
4258
4259/// growOperands - grow operands - This grows the operand list in response
4260/// to a push_back style of operation. This grows the number of ops by 2 times.
4261///
4262void IndirectBrInst::growOperands() {
4263 unsigned e = getNumOperands();
4264 unsigned NumOps = e*2;
4265
4266 ReservedSpace = NumOps;
4267 growHungoffUses(ReservedSpace);
4268}
4269
4270IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4271 InsertPosition InsertBefore)
4272 : Instruction(Type::getVoidTy(Address->getContext()),
4273 Instruction::IndirectBr, AllocMarker, InsertBefore) {
4274 init(Address, NumCases);
4275}
4276
4277IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4278 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4279 AllocMarker) {
4280 NumUserOperands = IBI.NumUserOperands;
4281 allocHungoffUses(IBI.getNumOperands());
4282 Use *OL = getOperandList();
4283 const Use *InOL = IBI.getOperandList();
4284 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4285 OL[i] = InOL[i];
4286 SubclassOptionalData = IBI.SubclassOptionalData;
4287}
4288
4289/// addDestination - Add a destination.
4290///
4292 unsigned OpNo = getNumOperands();
4293 if (OpNo+1 > ReservedSpace)
4294 growOperands(); // Get more space!
4295 // Initialize some new operands.
4296 assert(OpNo < ReservedSpace && "Growing didn't work!");
4298 getOperandList()[OpNo] = DestBB;
4299}
4300
4301/// removeDestination - This method removes the specified successor from the
4302/// indirectbr instruction.
4304 assert(idx < getNumOperands()-1 && "Successor index out of range!");
4305
4306 unsigned NumOps = getNumOperands();
4307 Use *OL = getOperandList();
4308
4309 // Replace this value with the last one.
4310 OL[idx+1] = OL[NumOps-1];
4311
4312 // Nuke the last value.
4313 OL[NumOps-1].set(nullptr);
4315}
4316
4317//===----------------------------------------------------------------------===//
4318// FreezeInst Implementation
4319//===----------------------------------------------------------------------===//
4320
4321FreezeInst::FreezeInst(Value *S, const Twine &Name, InsertPosition InsertBefore)
4322 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4323 setName(Name);
4324}
4325
4326//===----------------------------------------------------------------------===//
4327// cloneImpl() implementations
4328//===----------------------------------------------------------------------===//
4329
4330// Define these methods here so vtables don't get emitted into every translation
4331// unit that uses these classes.
4332
4333GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
4335 return new (AllocMarker) GetElementPtrInst(*this, AllocMarker);
4336}
4337
4341
4345
4347 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4348}
4349
4351 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4352}
4353
4354ExtractValueInst *ExtractValueInst::cloneImpl() const {
4355 return new ExtractValueInst(*this);
4356}
4357
4358InsertValueInst *InsertValueInst::cloneImpl() const {
4359 return new InsertValueInst(*this);
4360}
4361
4364 getOperand(0), getAlign());
4365 Result->setUsedWithInAlloca(isUsedWithInAlloca());
4366 Result->setSwiftError(isSwiftError());
4367 return Result;
4368}
4369
4371 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4373}
4374
4379
4384 Result->setVolatile(isVolatile());
4385 Result->setWeak(isWeak());
4386 return Result;
4387}
4388
4390 AtomicRMWInst *Result =
4393 Result->setVolatile(isVolatile());
4394 return Result;
4395}
4396
4400
4402 return new TruncInst(getOperand(0), getType());
4403}
4404
4406 return new ZExtInst(getOperand(0), getType());
4407}
4408
4410 return new SExtInst(getOperand(0), getType());
4411}
4412
4414 return new FPTruncInst(getOperand(0), getType());
4415}
4416
4418 return new FPExtInst(getOperand(0), getType());
4419}
4420
4422 return new UIToFPInst(getOperand(0), getType());
4423}
4424
4426 return new SIToFPInst(getOperand(0), getType());
4427}
4428
4430 return new FPToUIInst(getOperand(0), getType());
4431}
4432
4434 return new FPToSIInst(getOperand(0), getType());
4435}
4436
4438 return new PtrToIntInst(getOperand(0), getType());
4439}
4440
4444
4446 return new IntToPtrInst(getOperand(0), getType());
4447}
4448
4450 return new BitCastInst(getOperand(0), getType());
4451}
4452
4456
4457CallInst *CallInst::cloneImpl() const {
4458 if (hasOperandBundles()) {
4462 return new (AllocMarker) CallInst(*this, AllocMarker);
4463 }
4465 return new (AllocMarker) CallInst(*this, AllocMarker);
4466}
4467
4468SelectInst *SelectInst::cloneImpl() const {
4470}
4471
4473 return new VAArgInst(getOperand(0), getType());
4474}
4475
4476ExtractElementInst *ExtractElementInst::cloneImpl() const {
4478}
4479
4480InsertElementInst *InsertElementInst::cloneImpl() const {
4482}
4483
4487
4488PHINode *PHINode::cloneImpl() const { return new (AllocMarker) PHINode(*this); }
4489
4490LandingPadInst *LandingPadInst::cloneImpl() const {
4491 return new LandingPadInst(*this);
4492}
4493
4494ReturnInst *ReturnInst::cloneImpl() const {
4496 return new (AllocMarker) ReturnInst(*this, AllocMarker);
4497}
4498
4499BranchInst *BranchInst::cloneImpl() const {
4501 return new (AllocMarker) BranchInst(*this, AllocMarker);
4502}
4503
4504SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4505
4506IndirectBrInst *IndirectBrInst::cloneImpl() const {
4507 return new IndirectBrInst(*this);
4508}
4509
4510InvokeInst *InvokeInst::cloneImpl() const {
4511 if (hasOperandBundles()) {
4515 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4516 }
4518 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4519}
4520
4521CallBrInst *CallBrInst::cloneImpl() const {
4522 if (hasOperandBundles()) {
4526 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4527 }
4529 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4530}
4531
4532ResumeInst *ResumeInst::cloneImpl() const {
4533 return new (AllocMarker) ResumeInst(*this);
4534}
4535
4536CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
4538 return new (AllocMarker) CleanupReturnInst(*this, AllocMarker);
4539}
4540
4541CatchReturnInst *CatchReturnInst::cloneImpl() const {
4542 return new (AllocMarker) CatchReturnInst(*this);
4543}
4544
4545CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
4546 return new CatchSwitchInst(*this);
4547}
4548
4549FuncletPadInst *FuncletPadInst::cloneImpl() const {
4551 return new (AllocMarker) FuncletPadInst(*this, AllocMarker);
4552}
4553
4555 LLVMContext &Context = getContext();
4556 return new UnreachableInst(Context);
4557}
4558
4559bool UnreachableInst::shouldLowerToTrap(bool TrapUnreachable,
4560 bool NoTrapAfterNoreturn) const {
4561 if (!TrapUnreachable)
4562 return false;
4563
4564 // We may be able to ignore unreachable behind a noreturn call.
4566 Call && Call->doesNotReturn()) {
4567 if (NoTrapAfterNoreturn)
4568 return false;
4569 // Do not emit an additional trap instruction.
4570 if (Call->isNonContinuableTrap())
4571 return false;
4572 }
4573
4574 if (getFunction()->hasFnAttribute(Attribute::Naked))
4575 return false;
4576
4577 return true;
4578}
4579
4581 return new FreezeInst(getOperand(0));
4582}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_ABI
Definition Compiler.h:213
This file contains the declarations for the subclasses of Constant, which represent the different fla...
@ Default
#define op(i)
Module.h This file contains the declarations for the Module class.
static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos)
static bool isImpliedFalseByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
static Value * createPlaceholderForShuffleVector(Value *V)
static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos)
static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))
static bool hasNonZeroFPOperands(const CmpInst *Cmp)
static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)
Try to lower a vector shuffle as a bit rotation.
static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)
static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)
static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static Value * getAISize(LLVMContext &Context, Value *Amt)
static bool isImpliedTrueByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file contains the declarations for metadata subclasses.
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
PowerPC Reduce CR logical Operation
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
This file implements the SmallBitVector class.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition APFloat.h:334
LLVM_ABI float convertToFloat() const
Converts this APFloat to host float value.
Definition APFloat.cpp:6143
Class for arbitrary precision integers.
Definition APInt.h:78
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1331
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1640
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition APInt.h:1599
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
This class represents a conversion between pointers from one address space to another.
LLVM_ABI AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
LLVM_ABI AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI AllocaInst * cloneImpl() const
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
LLVM_ABI AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, InsertPosition InsertBefore)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:131
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
iterator begin() const
Definition ArrayRef.h:130
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:186
Class to represent array types.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
LLVM_ABI AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
void setAlignment(Align Align)
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
LLVM_ABI AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI AtomicRMWInst * cloneImpl() const
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Add
*p = old + v
@ FAdd
*p = old + v
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ Sub
*p = old - v
@ And
*p = old & v
@ Xor
*p = old ^ v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ FSub
*p = old - v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
@ Nand
*p = ~(old & v)
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
void setOperation(BinOp Operation)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
BinOp getOperation() const
LLVM_ABI AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
void setAlignment(Align Align)
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
LLVM_ABI CaptureInfo getCaptureInfo() const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getRange() const
Returns the value of the range attribute.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
static LLVM_ABI Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
LLVM_ABI bool swapOperands()
Exchange the two operands to this instruction.
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition InstrTypes.h:181
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
LLVM_ABI BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
LLVM_ABI BinaryOperator * cloneImpl() const
This class represents a no-op cast from one type to another.
LLVM_ABI BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
LLVM_ABI BitCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
LLVM_ABI BranchInst * cloneImpl() const
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
bool isInlineAsm() const
Check if this call is an inline asm statement.
LLVM_ABI BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)
Return the BundleOpInfo for the operand at index OpIdx.
void setCallingConv(CallingConv::ID CC)
LLVM_ABI FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
LLVM_ABI bool paramHasNonNullAttr(unsigned ArgNo, bool AllowUndefOrPoison) const
Return true if this argument has the nonnull attribute on either the CallBase instruction or the call...
LLVM_ABI MemoryEffects getMemoryEffects() const
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
LLVM_ABI bool doesNotAccessMemory() const
Determine if the call does not access memory.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
LLVM_ABI void setOnlyAccessesArgMemory()
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
OperandBundleUse operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const
Simple helper function to map a BundleOpInfo to an OperandBundleUse.
LLVM_ABI void setOnlyAccessesInaccessibleMemOrArgMem()
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI void setDoesNotAccessMemory()
AttributeSet getParamAttributes(unsigned ArgNo) const
Return the param attributes for this call.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
LLVM_ABI unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don't have a fixed number of extra operands.
BundleOpInfo * bundle_op_iterator
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
LLVM_ABI bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
LLVM_ABI void setOnlyReadsMemory()
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
LLVM_ABI bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
LLVM_ABI CaptureInfo getCaptureInfo(unsigned OpNo) const
Return which pointer components this operand may capture.
LLVM_ABI bool hasArgumentWithAdditionalReturnCaptureComponents() const
Returns whether the call has an argument that has an attribute like captures(ret: address,...
CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
Value * getCalledOperand() const
LLVM_ABI void setOnlyWritesMemory()
LLVM_ABI op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)
Populate the BundleOpInfo instances and the Use& vector from Bundles.
AttributeList Attrs
parameter attributes for callable
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
LLVM_ABI std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
LLVM_ABI bool isReturnNonNull() const
Return true if the return value is known to be not null.
Value * getArgOperand(unsigned i) const
FunctionType * FTy
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
LLVM_ABI Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
LLVM_ABI void setOnlyAccessesInaccessibleMemory()
static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
LLVM_ABI bool onlyWritesMemory() const
Determine if the call does not access or only writes memory.
LLVM_ABI bool hasClobberingOperandBundles() const
Return true if this operand bundle user has operand bundles that may write to the heap.
void setCalledOperand(Value *V)
static LLVM_ABI CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
LLVM_ABI bool hasReadingOperandBundles() const
Return true if this operand bundle user has operand bundles that may read from the heap.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI void setMemoryEffects(MemoryEffects ME)
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
SmallVector< BasicBlock *, 16 > getIndirectDests() const
void setDefaultDest(BasicBlock *B)
void setIndirectDest(unsigned i, BasicBlock *B)
BasicBlock * getDefaultDest() const
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
LLVM_ABI CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
TailCallKind getTailCallKind() const
LLVM_ABI CallInst * cloneImpl() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Represents which components of the pointer may be captured in which location.
Definition ModRef.h:359
CaptureComponents getOtherComponents() const
Get components potentially captured through locations other than the return value.
Definition ModRef.h:391
static CaptureInfo none()
Create CaptureInfo that does not capture any components of the pointer.
Definition ModRef.h:372
static CaptureInfo all()
Create CaptureInfo that may capture all components of the pointer.
Definition ModRef.h:375
CaptureComponents getRetComponents() const
Get components potentially captured by the return value.
Definition ModRef.h:387
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static LLVM_ABI CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast or an AddrSpaceCast cast instruction.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition InstrTypes.h:610
static LLVM_ABI unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, const DataLayout *DL)
Determine how a pair of casts can be eliminated, if they can be at all.
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static LLVM_ABI CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
CastInst(Type *Ty, unsigned iType, Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics for subclasses.
Definition InstrTypes.h:451
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static LLVM_ABI bool isBitCastable(Type *SrcTy, Type *DestTy)
Check whether a bitcast between these types is valid.
static LLVM_ABI CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static LLVM_ABI CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static LLVM_ABI bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
static LLVM_ABI CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
LLVM_ABI bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
static LLVM_ABI CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a SExt or BitCast cast instruction.
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
LLVM_ABI CatchReturnInst * cloneImpl() const
void setUnwindDest(BasicBlock *UnwindDest)
LLVM_ABI void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction... Note: This action invalidates handler_end().
LLVM_ABI CatchSwitchInst * cloneImpl() const
mapped_iterator< op_iterator, DerefFnTy > handler_iterator
Value * getParentPad() const
void setParentPad(Value *ParentPad)
BasicBlock * getUnwindDest() const
LLVM_ABI void removeHandler(handler_iterator HI)
LLVM_ABI CleanupReturnInst * cloneImpl() const
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition InstrTypes.h:858
bool isEquality() const
Determine if this is an equals/not equals predicate.
Definition InstrTypes.h:915
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition InstrTypes.h:768
bool isFalseWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:948
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:693
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:678
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
LLVM_ABI bool isEquivalence(bool Invert=false) const
Determine if one operand of this compare can always be replaced by the other operand,...
bool isSigned() const
Definition InstrTypes.h:930
static LLVM_ABI bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
bool isTrueWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:942
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
static bool isFPPredicate(Predicate P)
Definition InstrTypes.h:770
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition InstrTypes.h:871
static LLVM_ABI CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...
bool isNonStrictPredicate() const
Definition InstrTypes.h:852
LLVM_ABI void swapOperands()
This is just a convenience that dispatches to the subclasses.
static bool isRelational(Predicate P)
Return true if the predicate is relational (not EQ or NE).
Definition InstrTypes.h:923
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
static LLVM_ABI StringRef getPredicateName(Predicate P)
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
bool isStrictPredicate() const
Definition InstrTypes.h:843
static LLVM_ABI bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
Definition InstrTypes.h:893
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
LLVM_ABI CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name="", InsertPosition InsertBefore=nullptr, Instruction *FlagsSource=nullptr)
bool isUnsigned() const
Definition InstrTypes.h:936
LLVM_ABI bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
CmpPredicate()
Default constructor.
static LLVM_ABI CmpPredicate get(const CmpInst *Cmp)
Do a ICmpInst::getCmpPredicate() or CmpInst::getPredicate(), as appropriate.
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
bool hasSameSign() const
Query samesign information, for optimizations.
static LLVM_ABI CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:285
const APFloat & getValueAPF() const
Definition Constants.h:328
This is the shared class of boolean and integer constants.
Definition Constants.h:87
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
LLVM_ABI ExtractElementInst * cloneImpl() const
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
LLVM_ABI ExtractValueInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
static LLVM_ABI bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
FCmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
This class represents an extension of floating point types.
LLVM_ABI FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
LLVM_ABI FPExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
This class represents a cast from floating point to signed integer.
LLVM_ABI FPToSIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
This class represents a cast from floating point to unsigned integer.
LLVM_ABI FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
LLVM_ABI FPToUIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a truncation of floating point types.
LLVM_ABI FPTruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
LLVM_ABI FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
LLVM_ABI FenceInst * cloneImpl() const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
LLVM_ABI FreezeInst(Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
void setParentPad(Value *ParentPad)
Value * getParentPad() const
Convenience accessors.
LLVM_ABI FuncletPadInst * cloneImpl() const
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
bool isVarArg() const
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutInBounds() const
unsigned getRaw() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
LLVM_ABI bool isInBounds() const
Determine whether the GEP has the inbounds flag.
LLVM_ABI bool hasNoUnsignedSignedWrap() const
Determine whether the GEP has the nusw flag.
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
LLVM_ABI bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
LLVM_ABI bool hasNoUnsignedWrap() const
Determine whether the GEP has the nuw flag.
LLVM_ABI bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
LLVM_ABI void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
LLVM_ABI bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
LLVM_ABI GetElementPtrInst * cloneImpl() const
LLVM_ABI bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
LLVM_ABI void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
LLVM_ABI GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
ICmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
bool isEquality() const
Return true if this predicate is either EQ or NE.
static LLVM_ABI Predicate getFlippedSignednessPredicate(Predicate Pred)
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static LLVM_ABI std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Indirect Branch Instruction.
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
LLVM_ABI void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
LLVM_ABI IndirectBrInst * cloneImpl() const
LLVM_ABI InsertElementInst * cloneImpl() const
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
bool isValid() const
Definition Instruction.h:62
BasicBlock * getBasicBlock()
Definition Instruction.h:63
This instruction inserts a struct field of array element value into an aggregate value.
LLVM_ABI InsertValueInst * cloneImpl() const
BitfieldElement::Type getSubclassData() const
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void swapProfMetadata()
If the instruction has "branch_weights" MD_prof metadata and the MDNode has three operands (including...
LLVM_ABI bool isVolatile() const LLVM_READONLY
Return true if this instruction has a volatile memory access.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Bitfield::Element< uint16_t, 0, 15 > OpaqueField
Instruction(const Instruction &)=delete
friend class BasicBlock
Various leaf nodes.
void setSubclassData(typename BitfieldElement::Type Value)
This class represents a cast from an integer to a pointer.
LLVM_ABI IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
Invoke instruction.
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
LLVM_ABI InvokeInst * cloneImpl() const
LLVM_ABI LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
void setUnwindDest(BasicBlock *B)
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVMContextImpl *const pImpl
Definition LLVMContext.h:70
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
LLVM_ABI LandingPadInst * cloneImpl() const
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
void setAlignment(Align Align)
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
LLVM_ABI LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
LLVM_ABI LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, InsertPosition InsertBefore)
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
static MemoryEffectsBase readOnly()
Definition ModRef.h:130
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition ModRef.h:226
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition ModRef.h:220
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:140
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:146
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition ModRef.h:239
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition ModRef.h:229
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition ModRef.h:223
static MemoryEffectsBase writeOnly()
Definition ModRef.h:135
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:163
static MemoryEffectsBase none()
Definition ModRef.h:125
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
Definition ModRef.h:250
StringRef getTag() const
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
LLVM_ABI void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)
Remove all incoming values for which the predicate returns true.
void setIncomingBlock(unsigned i, BasicBlock *BB)
LLVM_ABI Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
LLVM_ABI bool hasConstantOrUndefValue() const
Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...
void setIncomingValue(unsigned i, Value *V)
const_block_iterator block_end() const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
LLVM_ABI Value * hasConstantValue() const
If the specified PHI node always merges together the same value, return the value,...
LLVM_ABI PHINode * cloneImpl() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Class to represent pointers.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
PtrToAddrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
PtrToAddrInst * cloneImpl() const
Clone an identical PtrToAddrInst.
This class represents a cast from a pointer to an integer.
LLVM_ABI PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
LLVM_ABI ResumeInst * cloneImpl() const
Return a value (possibly void), from a function.
LLVM_ABI ReturnInst * cloneImpl() const
This class represents a sign extension of integer types.
LLVM_ABI SExtInst * cloneImpl() const
Clone an identical SExtInst.
LLVM_ABI SExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a cast from signed integer to floating point.
LLVM_ABI SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
LLVM_ABI SIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Class to represent scalable SIMD vectors.
LLVM_ABI SelectInst * cloneImpl() const
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
ArrayRef< int > getShuffleMask() const
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
LLVM_ABI ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
LLVM_ABI bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
static LLVM_ABI bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)
Return true if this shuffle mask represents "clustered" mask of size VF, i.e.
LLVM_ABI bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.
static LLVM_ABI bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector.
LLVM_ABI bool isConcat() const
Return true if this shuffle concatenates its 2 source vectors.
static LLVM_ABI bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)
Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...
LLVM_ABI ShuffleVectorInst * cloneImpl() const
static LLVM_ABI bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
LLVM_ABI void setShuffleMask(ArrayRef< int > Mask)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
LLVM_ABI void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static LLVM_ABI Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)
static LLVM_ABI bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)
Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...
static LLVM_ABI bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Align getAlign() const
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI StoreInst * cloneImpl() const
LLVM_ABI StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
bool isVolatile() const
Return true if this is a store to a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Class to represent struct types.
LLVM_ABI void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
LLVM_ABI Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
LLVM_ABI CaseWeightOpt getSuccessorWeight(unsigned idx)
LLVM_ABI void replaceDefaultDest(SwitchInst::CaseIt I)
Replace the default destination by given case.
std::optional< uint32_t > CaseWeightOpt
LLVM_ABI SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
Multiway switch.
void allocHungoffUses(unsigned N)
LLVM_ABI SwitchInst * cloneImpl() const
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
ConstantInt *const * case_values() const
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
LLVM_ABI CaseIt removeCase(CaseIt I)
This method removes the specified case and its successor from the switch instruction.
This class represents a truncation of integer types.
LLVM_ABI TruncInst * cloneImpl() const
Clone an identical TruncInst.
LLVM_ABI TruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
Definition TypeSize.h:340
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value.
Definition Type.cpp:249
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isTokenTy() const
Return true if this is 'token'.
Definition Type.h:234
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
This class represents a cast unsigned integer to floating point.
LLVM_ABI UIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
UnaryInstruction(Type *Ty, unsigned iType, Value *V, InsertPosition InsertBefore=nullptr)
Definition InstrTypes.h:62
static LLVM_ABI UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a unary instruction, given the opcode and an operand.
LLVM_ABI UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
LLVM_ABI UnaryOperator * cloneImpl() const
UnaryOps getOpcode() const
Definition InstrTypes.h:154
LLVM_ABI UnreachableInst(LLVMContext &C, InsertPosition InsertBefore=nullptr)
LLVM_ABI bool shouldLowerToTrap(bool TrapUnreachable, bool NoTrapAfterNoreturn) const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI UnreachableInst * cloneImpl() const
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI void set(Value *Val)
Definition Value.h:905
Use * op_iterator
Definition User.h:280
const Use * getOperandList() const
Definition User.h:226
op_iterator op_begin()
Definition User.h:285
LLVM_ABI void allocHungoffUses(unsigned N, bool WithExtraValues=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition User.cpp:54
const Use & getOperandUse(unsigned i) const
Definition User.h:246
void setNumHungOffUseOperands(unsigned NumOps)
Subclasses with hung off uses need to manage the operand count themselves.
Definition User.h:266
Use & Op()
Definition User.h:197
LLVM_ABI void growHungoffUses(unsigned N, bool WithExtraValues=false)
Grow the number of hung off uses.
Definition User.cpp:71
Value * getOperand(unsigned i) const
Definition User.h:233
unsigned getNumOperands() const
Definition User.h:255
op_iterator op_end()
Definition User.h:287
VAArgInst(Value *List, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI VAArgInst * cloneImpl() const
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition Value.h:85
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:397
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1106
unsigned NumUserOperands
Definition Value.h:109
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
LLVM_ABI ZExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
typename base_list_type::iterator iterator
Definition ilist.h:121
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
cstfp_pred_ty< is_non_zero_not_denormal_fp > m_NonZeroNotDenormalFP()
Match a floating-point non-zero that is not a denormal.
initializer< Ty > init(const Ty &Val)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
Definition CoroShape.h:31
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
auto seq_inclusive(T Begin, T End)
Iterate over an integral type from Begin to End inclusive.
Definition Sequence.h:325
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:367
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition ModRef.h:301
std::enable_if_t< std::is_unsigned_v< T >, std::optional< T > > checkedMulUnsigned(T LHS, T RHS)
Multiply two unsigned integers LHS and RHS.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:361
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr int PoisonMaskElem
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1883
constexpr unsigned BitWidth
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
bool capturesAnything(CaptureComponents CC)
Definition ModRef.h:324
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2156
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
LLVM_ABI void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Summary of memprof metadata on allocations.
Used to keep track of an operand bundle.
uint32_t End
The index in the Use& vector where operands for this operand bundle ends.
uint32_t Begin
The index in the Use& vector where operands for this operand bundle starts.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
static LLVM_ABI std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
static LLVM_ABI std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
static LLVM_ABI std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
static LLVM_ABI std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static LLVM_ABI std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static LLVM_ABI std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static LLVM_ABI std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
static LLVM_ABI std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static LLVM_ABI std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
static LLVM_ABI std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
Matching combinators.
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:276
Indicates this User has operands co-allocated.
Definition User.h:60
Indicates this User has operands and a descriptor co-allocated .
Definition User.h:66