LLVM 23.0.0git
TargetTransformInfo.cpp
Go to the documentation of this file.
1//===- llvm/Analysis/TargetTransformInfo.cpp ------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
11#include "llvm/Analysis/CFG.h"
15#include "llvm/IR/CFG.h"
16#include "llvm/IR/Dominators.h"
17#include "llvm/IR/Instruction.h"
20#include "llvm/IR/Module.h"
21#include "llvm/IR/Operator.h"
24#include <optional>
25#include <utility>
26
27using namespace llvm;
28using namespace PatternMatch;
29
30#define DEBUG_TYPE "tti"
31
32static cl::opt<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false),
34 cl::desc("Recognize reduction patterns."));
35
37 "cache-line-size", cl::init(0), cl::Hidden,
38 cl::desc("Use this to override the target cache line size when "
39 "specified by the user."));
40
42 "min-page-size", cl::init(0), cl::Hidden,
43 cl::desc("Use this to override the target's minimum page size."));
44
46 "predictable-branch-threshold", cl::init(99), cl::Hidden,
48 "Use this to override the target's predictable branch threshold (%)."));
49
50namespace {
51/// No-op implementation of the TTI interface using the utility base
52/// classes.
53///
54/// This is used when no target specific information is available.
55struct NoTTIImpl : TargetTransformInfoImplCRTPBase<NoTTIImpl> {
56 explicit NoTTIImpl(const DataLayout &DL)
57 : TargetTransformInfoImplCRTPBase<NoTTIImpl>(DL) {}
58};
59} // namespace
60
62 std::unique_ptr<const TargetTransformInfoImplBase> Impl)
63 : TTIImpl(std::move(Impl)) {}
64
66 // If the loop has irreducible control flow, it can not be converted to
67 // Hardware loop.
68 LoopBlocksRPO RPOT(L);
69 RPOT.perform(&LI);
71 return false;
72 return true;
73}
74
76 Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarizationCost,
77 bool TypeBasedOnly)
78 : II(dyn_cast<IntrinsicInst>(&CI)), RetTy(CI.getType()), IID(Id),
79 ScalarizationCost(ScalarizationCost) {
80
81 if (const auto *FPMO = dyn_cast<FPMathOperator>(&CI))
82 FMF = FPMO->getFastMathFlags();
83
84 if (!TypeBasedOnly)
85 Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end());
87 ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());
88}
89
92 FastMathFlags Flags,
93 const IntrinsicInst *I,
94 InstructionCost ScalarCost)
95 : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
96 ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
97}
98
101 : RetTy(Ty), IID(Id) {
102
103 Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
104 ParamTys.reserve(Arguments.size());
105 for (const Value *Argument : Arguments)
106 ParamTys.push_back(Argument->getType());
107}
108
112 FastMathFlags Flags,
113 const IntrinsicInst *I,
114 InstructionCost ScalarCost)
115 : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
116 ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
117 Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
118}
119
121 // Match default options:
122 // - hardware-loop-counter-bitwidth = 32
123 // - hardware-loop-decrement = 1
124 CountType = Type::getInt32Ty(L->getHeader()->getContext());
125 LoopDecrement = ConstantInt::get(CountType, 1);
126}
127
129 LoopInfo &LI, DominatorTree &DT,
130 bool ForceNestedLoop,
132 SmallVector<BasicBlock *, 4> ExitingBlocks;
133 L->getExitingBlocks(ExitingBlocks);
134
135 for (BasicBlock *BB : ExitingBlocks) {
136 // If we pass the updated counter back through a phi, we need to know
137 // which latch the updated value will be coming from.
138 if (!L->isLoopLatch(BB)) {
140 continue;
141 }
142
143 const SCEV *EC = SE.getExitCount(L, BB);
145 continue;
146 if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
147 if (ConstEC->getValue()->isZero())
148 continue;
149 } else if (!SE.isLoopInvariant(EC, L))
150 continue;
151
152 if (SE.getTypeSizeInBits(EC->getType()) > CountType->getBitWidth())
153 continue;
154
155 // If this exiting block is contained in a nested loop, it is not eligible
156 // for insertion of the branch-and-decrement since the inner loop would
157 // end up messing up the value in the CTR.
158 if (!IsNestingLegal && LI.getLoopFor(BB) != L && !ForceNestedLoop)
159 continue;
160
161 // We now have a loop-invariant count of loop iterations (which is not the
162 // constant zero) for which we know that this loop will not exit via this
163 // existing block.
164
165 // We need to make sure that this block will run on every loop iteration.
166 // For this to be true, we must dominate all blocks with backedges. Such
167 // blocks are in-loop predecessors to the header block.
168 bool NotAlways = false;
169 for (BasicBlock *Pred : predecessors(L->getHeader())) {
170 if (!L->contains(Pred))
171 continue;
172
173 if (!DT.dominates(BB, Pred)) {
174 NotAlways = true;
175 break;
176 }
177 }
178
179 if (NotAlways)
180 continue;
181
182 // Make sure this blocks ends with a conditional branch.
183 Instruction *TI = BB->getTerminator();
184 if (!TI)
185 continue;
186
187 if (CondBrInst *BI = dyn_cast<CondBrInst>(TI))
188 ExitBranch = BI;
189 else
190 continue;
191
192 // Note that this block may not be the loop latch block, even if the loop
193 // has a latch block.
194 ExitBlock = BB;
195 ExitCount = EC;
196 break;
197 }
198
199 if (!ExitBlock)
200 return false;
201 return true;
202}
203
205 : TTIImpl(std::make_unique<NoTTIImpl>(DL)) {}
206
208
211
213 TTIImpl = std::move(RHS.TTIImpl);
214 return *this;
215}
216
218 return TTIImpl->getInliningThresholdMultiplier();
219}
220
221unsigned
223 return TTIImpl->getInliningCostBenefitAnalysisSavingsMultiplier();
224}
225
226unsigned
228 const {
229 return TTIImpl->getInliningCostBenefitAnalysisProfitableMultiplier();
230}
231
233 return TTIImpl->getInliningLastCallToStaticBonus();
234}
235
236unsigned
238 return TTIImpl->adjustInliningThreshold(CB);
239}
240
242 const AllocaInst *AI) const {
243 return TTIImpl->getCallerAllocaCost(CB, AI);
244}
245
247 return TTIImpl->getInlinerVectorBonusPercent();
248}
249
251 Type *PointeeType, const Value *Ptr, ArrayRef<const Value *> Operands,
252 Type *AccessType, TTI::TargetCostKind CostKind) const {
253 return TTIImpl->getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);
254}
255
258 const TTI::PointersChainInfo &Info, Type *AccessTy,
260 assert((Base || !Info.isSameBase()) &&
261 "If pointers have same base address it has to be provided.");
262 return TTIImpl->getPointersChainCost(Ptrs, Base, Info, AccessTy, CostKind);
263}
264
266 const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI,
267 BlockFrequencyInfo *BFI) const {
268 return TTIImpl->getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI);
269}
270
274 enum TargetCostKind CostKind) const {
275 InstructionCost Cost = TTIImpl->getInstructionCost(U, Operands, CostKind);
277 "TTI should not produce negative costs!");
278 return Cost;
279}
280
282 return PredictableBranchThreshold.getNumOccurrences() > 0
284 : TTIImpl->getPredictableBranchThreshold();
285}
286
288 return TTIImpl->getBranchMispredictPenalty();
289}
290
292 return TTIImpl->hasBranchDivergence(F);
293}
294
297 ValueUniformity VU = TTIImpl->getValueUniformity(V);
298 if (const auto *Call = dyn_cast<CallBase>(V)) {
300 Call->hasFnAttr(Attribute::NoDivergenceSource))
302 }
303 return VU;
304}
305
307 unsigned ToAS) const {
308 return TTIImpl->isValidAddrSpaceCast(FromAS, ToAS);
309}
310
312 unsigned ToAS) const {
313 return TTIImpl->addrspacesMayAlias(FromAS, ToAS);
314}
315
317 return TTIImpl->getFlatAddressSpace();
318}
319
321 SmallVectorImpl<int> &OpIndexes, Intrinsic::ID IID) const {
322 return TTIImpl->collectFlatAddressOperands(OpIndexes, IID);
323}
324
326 unsigned ToAS) const {
327 return TTIImpl->isNoopAddrSpaceCast(FromAS, ToAS);
328}
329
330std::pair<KnownBits, KnownBits>
332 const Value &PtrOp) const {
333 return TTIImpl->computeKnownBitsAddrSpaceCast(ToAS, PtrOp);
334}
335
337 unsigned FromAS, unsigned ToAS, const KnownBits &FromPtrBits) const {
338 return TTIImpl->computeKnownBitsAddrSpaceCast(FromAS, ToAS, FromPtrBits);
339}
340
342 unsigned SrcAS, unsigned DstAS) const {
343 return TTIImpl->getAddrSpaceCastPreservedPtrMask(SrcAS, DstAS);
344}
345
347 unsigned AS) const {
348 return TTIImpl->canHaveNonUndefGlobalInitializerInAddressSpace(AS);
349}
350
352 return TTIImpl->getAssumedAddrSpace(V);
353}
354
356 return TTIImpl->isSingleThreaded();
357}
358
359std::pair<const Value *, unsigned>
361 return TTIImpl->getPredicatedAddrSpace(V);
362}
363
365 IntrinsicInst *II, Value *OldV, Value *NewV) const {
366 return TTIImpl->rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
367}
368
370 return TTIImpl->isLoweredToCall(F);
371}
372
375 TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const {
376 return TTIImpl->isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
377}
378
380 return TTIImpl->getEpilogueVectorizationMinVF();
381}
382
384 TailFoldingInfo *TFI) const {
385 return TTIImpl->preferTailFoldingOverEpilogue(TFI);
386}
387
389 return TTIImpl->getPreferredTailFoldingStyle();
390}
391
392std::optional<Instruction *>
394 IntrinsicInst &II) const {
395 return TTIImpl->instCombineIntrinsic(IC, II);
396}
397
399 InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
400 bool &KnownBitsComputed) const {
401 return TTIImpl->simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
402 KnownBitsComputed);
403}
404
406 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
407 APInt &UndefElts2, APInt &UndefElts3,
408 std::function<void(Instruction *, unsigned, APInt, APInt &)>
409 SimplifyAndSetOp) const {
410 return TTIImpl->simplifyDemandedVectorEltsIntrinsic(
411 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
412 SimplifyAndSetOp);
413}
414
417 OptimizationRemarkEmitter *ORE) const {
418 return TTIImpl->getUnrollingPreferences(L, SE, UP, ORE);
419}
420
422 PeelingPreferences &PP) const {
423 return TTIImpl->getPeelingPreferences(L, SE, PP);
424}
425
427 return TTIImpl->isLegalAddImmediate(Imm);
428}
429
431 return TTIImpl->isLegalAddScalableImmediate(Imm);
432}
433
435 return TTIImpl->isLegalICmpImmediate(Imm);
436}
437
439 int64_t BaseOffset,
440 bool HasBaseReg, int64_t Scale,
441 unsigned AddrSpace,
442 Instruction *I,
443 int64_t ScalableOffset) const {
444 return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
445 Scale, AddrSpace, I, ScalableOffset);
446}
447
449 const LSRCost &C2) const {
450 return TTIImpl->isLSRCostLess(C1, C2);
451}
452
454 return TTIImpl->isNumRegsMajorCostOfLSR();
455}
456
458 return TTIImpl->shouldDropLSRSolutionIfLessProfitable();
459}
460
462 return TTIImpl->isProfitableLSRChainElement(I);
463}
464
466 return TTIImpl->canMacroFuseCmp();
467}
468
470 ScalarEvolution *SE, LoopInfo *LI,
472 TargetLibraryInfo *LibInfo) const {
473 return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
474}
475
478 ScalarEvolution *SE) const {
479 return TTIImpl->getPreferredAddressingMode(L, SE);
480}
481
483 unsigned AddressSpace,
484 TTI::MaskKind MaskKind) const {
485 return TTIImpl->isLegalMaskedStore(DataType, Alignment, AddressSpace,
486 MaskKind);
487}
488
490 unsigned AddressSpace,
491 TTI::MaskKind MaskKind) const {
492 return TTIImpl->isLegalMaskedLoad(DataType, Alignment, AddressSpace,
493 MaskKind);
494}
495
497 Align Alignment) const {
498 return TTIImpl->isLegalNTStore(DataType, Alignment);
499}
500
501bool TargetTransformInfo::isLegalNTLoad(Type *DataType, Align Alignment) const {
502 return TTIImpl->isLegalNTLoad(DataType, Alignment);
503}
504
506 ElementCount NumElements) const {
507 return TTIImpl->isLegalBroadcastLoad(ElementTy, NumElements);
508}
509
511 Align Alignment) const {
512 return TTIImpl->isLegalMaskedGather(DataType, Alignment);
513}
514
516 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
517 const SmallBitVector &OpcodeMask) const {
518 return TTIImpl->isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask);
519}
520
522 Align Alignment) const {
523 return TTIImpl->isLegalMaskedScatter(DataType, Alignment);
524}
525
527 Align Alignment) const {
528 return TTIImpl->forceScalarizeMaskedGather(DataType, Alignment);
529}
530
532 Align Alignment) const {
533 return TTIImpl->forceScalarizeMaskedScatter(DataType, Alignment);
534}
535
537 Align Alignment) const {
538 return TTIImpl->isLegalMaskedCompressStore(DataType, Alignment);
539}
540
542 Align Alignment) const {
543 return TTIImpl->isLegalMaskedExpandLoad(DataType, Alignment);
544}
545
547 Align Alignment) const {
548 return TTIImpl->isLegalStridedLoadStore(DataType, Alignment);
549}
550
552 VectorType *VTy, unsigned Factor, Align Alignment,
553 unsigned AddrSpace) const {
554 return TTIImpl->isLegalInterleavedAccessType(VTy, Factor, Alignment,
555 AddrSpace);
556}
557
559 Type *DataType) const {
560 return TTIImpl->isLegalMaskedVectorHistogram(AddrType, DataType);
561}
562
564 return TTIImpl->enableOrderedReductions();
565}
566
567bool TargetTransformInfo::hasDivRemOp(Type *DataType, bool IsSigned) const {
568 return TTIImpl->hasDivRemOp(DataType, IsSigned);
569}
570
572 unsigned AddrSpace) const {
573 return TTIImpl->hasVolatileVariant(I, AddrSpace);
574}
575
577 return TTIImpl->prefersVectorizedAddressing();
578}
579
581 Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg,
582 int64_t Scale, unsigned AddrSpace) const {
583 InstructionCost Cost = TTIImpl->getScalingFactorCost(
584 Ty, BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace);
585 assert(Cost >= 0 && "TTI should not produce negative costs!");
586 return Cost;
587}
588
590 return TTIImpl->LSRWithInstrQueries();
591}
592
594 return TTIImpl->isTruncateFree(Ty1, Ty2);
595}
596
598 return TTIImpl->isProfitableToHoist(I);
599}
600
601bool TargetTransformInfo::useAA() const { return TTIImpl->useAA(); }
602
604 return TTIImpl->isTypeLegal(Ty);
605}
606
608 return TTIImpl->getRegUsageForType(Ty);
609}
610
612 return TTIImpl->shouldBuildLookupTables();
613}
614
616 Constant *C) const {
617 return TTIImpl->shouldBuildLookupTablesForConstant(C);
618}
619
621 return TTIImpl->shouldBuildRelLookupTables();
622}
623
625 return TTIImpl->useColdCCForColdCall(F);
626}
627
629 return TTIImpl->useFastCCForInternalCall(F);
630}
631
633 Intrinsic::ID ID, unsigned ScalarOpdIdx) const {
634 return TTIImpl->isTargetIntrinsicWithScalarOpAtArg(ID, ScalarOpdIdx);
635}
636
638 Intrinsic::ID ID, int OpdIdx) const {
639 return TTIImpl->isTargetIntrinsicWithOverloadTypeAtArg(ID, OpdIdx);
640}
641
643 Intrinsic::ID ID, int RetIdx) const {
644 return TTIImpl->isTargetIntrinsicWithStructReturnOverloadAtField(ID, RetIdx);
645}
646
649 if (!I)
651
652 // For inserts, check if the value being inserted comes from a single-use
653 // load.
654 if (isa<InsertElementInst>(I) && isa<LoadInst>(I->getOperand(1)) &&
655 I->getOperand(1)->hasOneUse())
657
658 // For extracts, check if it has a single use that is a store.
659 if (isa<ExtractElementInst>(I) && I->hasOneUse() &&
660 isa<StoreInst>(*I->user_begin()))
662
664}
665
667 VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
668 TTI::TargetCostKind CostKind, bool ForPoisonSrc, ArrayRef<Value *> VL,
669 TTI::VectorInstrContext VIC) const {
670 return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
671 CostKind, ForPoisonSrc, VL, VIC);
672}
673
676 TTI::VectorInstrContext VIC) const {
677 return TTIImpl->getOperandsScalarizationOverhead(Tys, CostKind, VIC);
678}
679
681 return TTIImpl->supportsEfficientVectorElementLoadStore();
682}
683
685 return TTIImpl->supportsTailCalls();
686}
687
689 return TTIImpl->supportsTailCallFor(CB);
690}
691
693 bool LoopHasReductions) const {
694 return TTIImpl->enableAggressiveInterleaving(LoopHasReductions);
695}
696
698TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
699 return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp);
700}
701
703 return TTIImpl->enableSelectOptimize();
704}
705
707 const Instruction *I) const {
708 return TTIImpl->shouldTreatInstructionLikeSelect(I);
709}
710
712 return TTIImpl->enableInterleavedAccessVectorization();
713}
714
716 return TTIImpl->enableMaskedInterleavedAccessVectorization();
717}
718
720 return TTIImpl->isFPVectorizationPotentiallyUnsafe();
721}
722
723bool
725 unsigned BitWidth,
726 unsigned AddressSpace,
727 Align Alignment,
728 unsigned *Fast) const {
729 return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth,
730 AddressSpace, Alignment, Fast);
731}
732
734TargetTransformInfo::getPopcntSupport(unsigned IntTyWidthInBit) const {
735 return TTIImpl->getPopcntSupport(IntTyWidthInBit);
736}
737
739 return TTIImpl->haveFastSqrt(Ty);
740}
741
743 const Instruction *I) const {
744 return TTIImpl->isExpensiveToSpeculativelyExecute(I);
745}
746
748 return TTIImpl->isFCmpOrdCheaperThanFCmpZero(Ty);
749}
750
752 InstructionCost Cost = TTIImpl->getFPOpCost(Ty);
753 assert(Cost >= 0 && "TTI should not produce negative costs!");
754 return Cost;
755}
756
758 unsigned Idx,
759 const APInt &Imm,
760 Type *Ty) const {
761 InstructionCost Cost = TTIImpl->getIntImmCodeSizeCost(Opcode, Idx, Imm, Ty);
762 assert(Cost >= 0 && "TTI should not produce negative costs!");
763 return Cost;
764}
765
769 InstructionCost Cost = TTIImpl->getIntImmCost(Imm, Ty, CostKind);
770 assert(Cost >= 0 && "TTI should not produce negative costs!");
771 return Cost;
772}
773
775 unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty,
778 TTIImpl->getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst);
779 assert(Cost >= 0 && "TTI should not produce negative costs!");
780 return Cost;
781}
782
785 const APInt &Imm, Type *Ty,
788 TTIImpl->getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
789 assert(Cost >= 0 && "TTI should not produce negative costs!");
790 return Cost;
791}
792
794 const Instruction &Inst, const Function &Fn) const {
795 return TTIImpl->preferToKeepConstantsAttached(Inst, Fn);
796}
797
798unsigned TargetTransformInfo::getNumberOfRegisters(unsigned ClassID) const {
799 return TTIImpl->getNumberOfRegisters(ClassID);
800}
801
803 bool IsStore) const {
804 return TTIImpl->hasConditionalLoadStoreForType(Ty, IsStore);
805}
806
808 Type *Ty) const {
809 return TTIImpl->getRegisterClassForType(Vector, Ty);
810}
811
812const char *TargetTransformInfo::getRegisterClassName(unsigned ClassID) const {
813 return TTIImpl->getRegisterClassName(ClassID);
814}
815
817 unsigned ClassID, TTI::TargetCostKind CostKind) const {
818 return TTIImpl->getRegisterClassSpillCost(ClassID, CostKind);
819}
820
822 unsigned ClassID, TTI::TargetCostKind CostKind) const {
823 return TTIImpl->getRegisterClassReloadCost(ClassID, CostKind);
824}
825
828 return TTIImpl->getRegisterBitWidth(K);
829}
830
832 return TTIImpl->getMinVectorRegisterBitWidth();
833}
834
835std::optional<unsigned> TargetTransformInfo::getMaxVScale() const {
836 return TTIImpl->getMaxVScale();
837}
838
839std::optional<unsigned> TargetTransformInfo::getVScaleForTuning() const {
840 return TTIImpl->getVScaleForTuning();
841}
842
845 return TTIImpl->shouldMaximizeVectorBandwidth(K);
846}
847
849 bool IsScalable) const {
850 return TTIImpl->getMinimumVF(ElemWidth, IsScalable);
851}
852
853unsigned TargetTransformInfo::getMaximumVF(unsigned ElemWidth,
854 unsigned Opcode) const {
855 return TTIImpl->getMaximumVF(ElemWidth, Opcode);
856}
857
858unsigned TargetTransformInfo::getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
859 Type *ScalarValTy,
860 Align Alignment,
861 unsigned AddrSpace) const {
862 return TTIImpl->getStoreMinimumVF(VF, ScalarMemTy, ScalarValTy, Alignment,
863 AddrSpace);
864}
865
867 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {
868 return TTIImpl->shouldConsiderAddressTypePromotion(
869 I, AllowPromotionWithoutCommonHeader);
870}
871
873 return CacheLineSize.getNumOccurrences() > 0 ? CacheLineSize
874 : TTIImpl->getCacheLineSize();
875}
876
877std::optional<unsigned>
879 return TTIImpl->getCacheSize(Level);
880}
881
882std::optional<unsigned>
884 return TTIImpl->getCacheAssociativity(Level);
885}
886
887std::optional<unsigned> TargetTransformInfo::getMinPageSize() const {
888 return MinPageSize.getNumOccurrences() > 0 ? MinPageSize
889 : TTIImpl->getMinPageSize();
890}
891
893 return TTIImpl->getPrefetchDistance();
894}
895
897 unsigned NumMemAccesses, unsigned NumStridedMemAccesses,
898 unsigned NumPrefetches, bool HasCall) const {
899 return TTIImpl->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
900 NumPrefetches, HasCall);
901}
902
904 return TTIImpl->getMaxPrefetchIterationsAhead();
905}
906
908 return TTIImpl->enableWritePrefetching();
909}
910
912 return TTIImpl->shouldPrefetchAddressSpace(AS);
913}
914
916 unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType,
918 PartialReductionExtendKind OpBExtend, std::optional<unsigned> BinOp,
919 TTI::TargetCostKind CostKind, std::optional<FastMathFlags> FMF) const {
920 return TTIImpl->getPartialReductionCost(Opcode, InputTypeA, InputTypeB,
921 AccumType, VF, OpAExtend, OpBExtend,
922 BinOp, CostKind, FMF);
923}
924
926 return TTIImpl->getMaxInterleaveFactor(VF);
927}
928
933
934 // undef/poison don't materialize constants.
935 if (isa<UndefValue>(V))
936 return {OK_AnyValue, OP_None};
937
938 if (isa<ConstantInt>(V) || isa<ConstantFP>(V)) {
939 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
940 if (CI->getValue().isPowerOf2())
941 OpProps = OP_PowerOf2;
942 else if (CI->getValue().isNegatedPowerOf2())
943 OpProps = OP_NegatedPowerOf2;
944 }
945 return {OK_UniformConstantValue, OpProps};
946 }
947
948 // A broadcast shuffle creates a uniform value.
949 // TODO: Add support for non-zero index broadcasts.
950 // TODO: Add support for different source vector width.
951 if (const auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V))
952 if (ShuffleInst->isZeroEltSplat())
953 OpInfo = OK_UniformValue;
954
955 const Value *Splat = getSplatValue(V);
956
957 // Check for a splat of a constant or for a non uniform vector of constants
958 // and check if the constant(s) are all powers of two.
959 if (Splat) {
960 // Check for a splat of a uniform value. This is not loop aware, so return
961 // true only for the obviously uniform cases (argument, globalvalue)
963 OpInfo = OK_UniformValue;
964 } else if (isa<Constant>(Splat)) {
966 if (auto *CI = dyn_cast<ConstantInt>(Splat)) {
967 if (CI->getValue().isPowerOf2())
968 OpProps = OP_PowerOf2;
969 else if (CI->getValue().isNegatedPowerOf2())
970 OpProps = OP_NegatedPowerOf2;
971 }
972 }
973 } else if (const auto *CDS = dyn_cast<ConstantDataSequential>(V)) {
975 bool AllPow2 = true, AllNegPow2 = true;
976 for (uint64_t I = 0, E = CDS->getNumElements(); I != E; ++I) {
977 if (auto *CI = dyn_cast<ConstantInt>(CDS->getElementAsConstant(I))) {
978 AllPow2 &= CI->getValue().isPowerOf2();
979 AllNegPow2 &= CI->getValue().isNegatedPowerOf2();
980 if (AllPow2 || AllNegPow2)
981 continue;
982 }
983 AllPow2 = AllNegPow2 = false;
984 break;
985 }
986 OpProps = AllPow2 ? OP_PowerOf2 : OpProps;
987 OpProps = AllNegPow2 ? OP_NegatedPowerOf2 : OpProps;
988 } else if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) {
990 }
991
992 return {OpInfo, OpProps};
993}
994
998 if (X == Y)
999 return OpInfoX;
1000 return OpInfoX.mergeWith(getOperandInfo(Y));
1001}
1002
1004 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1005 OperandValueInfo Op1Info, OperandValueInfo Op2Info,
1006 ArrayRef<const Value *> Args, const Instruction *CxtI,
1007 const TargetLibraryInfo *TLibInfo) const {
1008
1009 // Use call cost for frem intructions that have platform specific vector math
1010 // functions, as those will be replaced with calls later by SelectionDAG or
1011 // ReplaceWithVecLib pass.
1012 if (TLibInfo && Opcode == Instruction::FRem) {
1013 VectorType *VecTy = dyn_cast<VectorType>(Ty);
1014 LibFunc Func;
1015 if (VecTy &&
1016 TLibInfo->getLibFunc(Instruction::FRem, Ty->getScalarType(), Func) &&
1017 TLibInfo->isFunctionVectorizable(TLibInfo->getName(Func),
1018 VecTy->getElementCount()))
1019 return getCallInstrCost(nullptr, VecTy, {VecTy, VecTy}, CostKind);
1020 }
1021
1022 InstructionCost Cost = TTIImpl->getArithmeticInstrCost(
1023 Opcode, Ty, CostKind, Op1Info, Op2Info, Args, CxtI);
1024 assert(Cost >= 0 && "TTI should not produce negative costs!");
1025 return Cost;
1026}
1027
1029 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
1030 const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind) const {
1032 TTIImpl->getAltInstrCost(VecTy, Opcode0, Opcode1, OpcodeMask, CostKind);
1033 assert(Cost >= 0 && "TTI should not produce negative costs!");
1034 return Cost;
1035}
1036
1038 ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef<int> Mask,
1039 TTI::TargetCostKind CostKind, int Index, VectorType *SubTp,
1040 ArrayRef<const Value *> Args, const Instruction *CxtI) const {
1041 assert((Mask.empty() || DstTy->isScalableTy() ||
1042 Mask.size() == DstTy->getElementCount().getKnownMinValue()) &&
1043 "Expected the Mask to match the return size if given");
1044 assert(SrcTy->getScalarType() == DstTy->getScalarType() &&
1045 "Expected the same scalar types");
1046 InstructionCost Cost = TTIImpl->getShuffleCost(
1047 Kind, DstTy, SrcTy, Mask, CostKind, Index, SubTp, Args, CxtI);
1048 assert(Cost >= 0 && "TTI should not produce negative costs!");
1049 return Cost;
1050}
1051
1054 if (auto *Cast = dyn_cast<CastInst>(I))
1055 return getPartialReductionExtendKind(Cast->getOpcode());
1056 return PR_None;
1057}
1058
1062 switch (Kind) {
1064 return Instruction::CastOps::ZExt;
1066 return Instruction::CastOps::SExt;
1068 return Instruction::CastOps::FPExt;
1069 default:
1070 break;
1071 }
1072 llvm_unreachable("Unhandled partial reduction extend kind");
1073}
1074
1077 Instruction::CastOps CastOpc) {
1078 switch (CastOpc) {
1079 case Instruction::CastOps::ZExt:
1080 return PR_ZeroExtend;
1081 case Instruction::CastOps::SExt:
1082 return PR_SignExtend;
1083 case Instruction::CastOps::FPExt:
1084 return PR_FPExtend;
1085 default:
1086 return PR_None;
1087 }
1088 llvm_unreachable("Unhandled cast opcode");
1089}
1090
1093 if (!I)
1094 return CastContextHint::None;
1095
1096 auto getLoadStoreKind = [](const Value *V, unsigned LdStOp, unsigned MaskedOp,
1097 unsigned GatScatOp) {
1099 if (!I)
1100 return CastContextHint::None;
1101
1102 if (I->getOpcode() == LdStOp)
1104
1105 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1106 if (II->getIntrinsicID() == MaskedOp)
1108 if (II->getIntrinsicID() == GatScatOp)
1110 }
1111
1113 };
1114
1115 switch (I->getOpcode()) {
1116 case Instruction::ZExt:
1117 case Instruction::SExt:
1118 case Instruction::FPExt:
1119 return getLoadStoreKind(I->getOperand(0), Instruction::Load,
1120 Intrinsic::masked_load, Intrinsic::masked_gather);
1121 case Instruction::Trunc:
1122 case Instruction::FPTrunc:
1123 if (I->hasOneUse())
1124 return getLoadStoreKind(*I->user_begin(), Instruction::Store,
1125 Intrinsic::masked_store,
1126 Intrinsic::masked_scatter);
1127 break;
1128 default:
1129 return CastContextHint::None;
1130 }
1131
1133}
1134
1136 unsigned Opcode, Type *Dst, Type *Src, CastContextHint CCH,
1137 TTI::TargetCostKind CostKind, const Instruction *I) const {
1138 assert((I == nullptr || I->getOpcode() == Opcode) &&
1139 "Opcode should reflect passed instruction.");
1141 TTIImpl->getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
1142 assert(Cost >= 0 && "TTI should not produce negative costs!");
1143 return Cost;
1144}
1145
1147 unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index,
1150 TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index, CostKind);
1151 assert(Cost >= 0 && "TTI should not produce negative costs!");
1152 return Cost;
1153}
1154
1156 unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I) const {
1157 assert((I == nullptr || I->getOpcode() == Opcode) &&
1158 "Opcode should reflect passed instruction.");
1159 InstructionCost Cost = TTIImpl->getCFInstrCost(Opcode, CostKind, I);
1160 assert(Cost >= 0 && "TTI should not produce negative costs!");
1161 return Cost;
1162}
1163
1165 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
1167 OperandValueInfo Op2Info, const Instruction *I) const {
1168 assert((I == nullptr || I->getOpcode() == Opcode) &&
1169 "Opcode should reflect passed instruction.");
1170 InstructionCost Cost = TTIImpl->getCmpSelInstrCost(
1171 Opcode, ValTy, CondTy, VecPred, CostKind, Op1Info, Op2Info, I);
1172 assert(Cost >= 0 && "TTI should not produce negative costs!");
1173 return Cost;
1174}
1175
1177 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
1178 const Value *Op0, const Value *Op1, TTI::VectorInstrContext VIC) const {
1179 assert((Opcode == Instruction::InsertElement ||
1180 Opcode == Instruction::ExtractElement) &&
1181 "Expecting Opcode to be insertelement/extractelement.");
1183 TTIImpl->getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1, VIC);
1184 assert(Cost >= 0 && "TTI should not produce negative costs!");
1185 return Cost;
1186}
1187
1189 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
1190 Value *Scalar, ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx,
1191 TTI::VectorInstrContext VIC) const {
1192 assert((Opcode == Instruction::InsertElement ||
1193 Opcode == Instruction::ExtractElement) &&
1194 "Expecting Opcode to be insertelement/extractelement.");
1195 InstructionCost Cost = TTIImpl->getVectorInstrCost(
1196 Opcode, Val, CostKind, Index, Scalar, ScalarUserAndIdx, VIC);
1197 assert(Cost >= 0 && "TTI should not produce negative costs!");
1198 return Cost;
1199}
1200
1203 unsigned Index, TTI::VectorInstrContext VIC) const {
1204 // FIXME: Assert that Opcode is either InsertElement or ExtractElement.
1205 // This is mentioned in the interface description and respected by all
1206 // callers, but never asserted upon.
1208 TTIImpl->getVectorInstrCost(I, Val, CostKind, Index, VIC);
1209 assert(Cost >= 0 && "TTI should not produce negative costs!");
1210 return Cost;
1211}
1212
1214 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind,
1215 unsigned Index) const {
1217 TTIImpl->getIndexedVectorInstrCostFromEnd(Opcode, Val, CostKind, Index);
1218 assert(Cost >= 0 && "TTI should not produce negative costs!");
1219 return Cost;
1220}
1221
1223 unsigned Opcode, TTI::TargetCostKind CostKind) const {
1224 assert((Opcode == Instruction::InsertValue ||
1225 Opcode == Instruction::ExtractValue) &&
1226 "Expecting Opcode to be insertvalue/extractvalue.");
1227 InstructionCost Cost = TTIImpl->getInsertExtractValueCost(Opcode, CostKind);
1228 assert(Cost >= 0 && "TTI should not produce negative costs!");
1229 return Cost;
1230}
1231
1233 Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts,
1235 InstructionCost Cost = TTIImpl->getReplicationShuffleCost(
1236 EltTy, ReplicationFactor, VF, DemandedDstElts, CostKind);
1237 assert(Cost >= 0 && "TTI should not produce negative costs!");
1238 return Cost;
1239}
1240
1242 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
1244 const Instruction *I) const {
1245 assert((I == nullptr || I->getOpcode() == Opcode) &&
1246 "Opcode should reflect passed instruction.");
1247 InstructionCost Cost = TTIImpl->getMemoryOpCost(
1248 Opcode, Src, Alignment, AddressSpace, CostKind, OpInfo, I);
1249 assert(Cost >= 0 && "TTI should not produce negative costs!");
1250 return Cost;
1251}
1252
1254 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1255 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1256 bool UseMaskForCond, bool UseMaskForGaps) const {
1257 InstructionCost Cost = TTIImpl->getInterleavedMemoryOpCost(
1258 Opcode, VecTy, Factor, Indices, Alignment, AddressSpace, CostKind,
1259 UseMaskForCond, UseMaskForGaps);
1260 assert(Cost >= 0 && "TTI should not produce negative costs!");
1261 return Cost;
1262}
1263
1267 InstructionCost Cost = TTIImpl->getIntrinsicInstrCost(ICA, CostKind);
1268 assert(Cost >= 0 && "TTI should not produce negative costs!");
1269 return Cost;
1270}
1271
1273 const MemIntrinsicCostAttributes &MICA,
1275 InstructionCost Cost = TTIImpl->getMemIntrinsicInstrCost(MICA, CostKind);
1276 assert(Cost >= 0 && "TTI should not produce negative costs!");
1277 return Cost;
1278}
1279
1282 ArrayRef<Type *> Tys,
1284 InstructionCost Cost = TTIImpl->getCallInstrCost(F, RetTy, Tys, CostKind);
1285 assert(Cost >= 0 && "TTI should not produce negative costs!");
1286 return Cost;
1287}
1288
1290 return TTIImpl->getNumberOfParts(Tp);
1291}
1292
1294 Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr,
1297 TTIImpl->getAddressComputationCost(PtrTy, SE, Ptr, CostKind);
1298 assert(Cost >= 0 && "TTI should not produce negative costs!");
1299 return Cost;
1300}
1301
1303 InstructionCost Cost = TTIImpl->getMemcpyCost(I);
1304 assert(Cost >= 0 && "TTI should not produce negative costs!");
1305 return Cost;
1306}
1307
1309 return TTIImpl->getMaxMemIntrinsicInlineSizeThreshold();
1310}
1311
1313 unsigned Opcode, VectorType *Ty, std::optional<FastMathFlags> FMF,
1316 TTIImpl->getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
1317 assert(Cost >= 0 && "TTI should not produce negative costs!");
1318 return Cost;
1319}
1320
1325 TTIImpl->getMinMaxReductionCost(IID, Ty, FMF, CostKind);
1326 assert(Cost >= 0 && "TTI should not produce negative costs!");
1327 return Cost;
1328}
1329
1331 unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty,
1332 std::optional<FastMathFlags> FMF, TTI::TargetCostKind CostKind) const {
1333 return TTIImpl->getExtendedReductionCost(Opcode, IsUnsigned, ResTy, Ty, FMF,
1334 CostKind);
1335}
1336
1338 bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty,
1340 return TTIImpl->getMulAccReductionCost(IsUnsigned, RedOpcode, ResTy, Ty,
1341 CostKind);
1342}
1343
1346 return TTIImpl->getCostOfKeepingLiveOverCall(Tys);
1347}
1348
1350 MemIntrinsicInfo &Info) const {
1351 return TTIImpl->getTgtMemIntrinsic(Inst, Info);
1352}
1353
1355 return TTIImpl->getAtomicMemIntrinsicMaxElementSize();
1356}
1357
1359 IntrinsicInst *Inst, Type *ExpectedType, bool CanCreate) const {
1360 return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType,
1361 CanCreate);
1362}
1363
1365 LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
1366 unsigned DestAddrSpace, Align SrcAlign, Align DestAlign,
1367 std::optional<uint32_t> AtomicElementSize) const {
1368 return TTIImpl->getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace,
1369 DestAddrSpace, SrcAlign, DestAlign,
1370 AtomicElementSize);
1371}
1372
1374 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
1375 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
1376 Align SrcAlign, Align DestAlign,
1377 std::optional<uint32_t> AtomicCpySize) const {
1378 TTIImpl->getMemcpyLoopResidualLoweringType(
1379 OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign,
1380 DestAlign, AtomicCpySize);
1381}
1382
1384 const Function *Callee) const {
1385 return TTIImpl->areInlineCompatible(Caller, Callee);
1386}
1387
1388unsigned
1390 const CallBase &Call,
1391 unsigned DefaultCallPenalty) const {
1392 return TTIImpl->getInlineCallPenalty(F, Call, DefaultCallPenalty);
1393}
1394
1396 const Function *Caller, const Attribute &Attr) const {
1397 return TTIImpl->shouldCopyAttributeWhenOutliningFrom(Caller, Attr);
1398}
1400 const Function *Callee,
1401 ArrayRef<Type *> Types) const {
1402 return TTIImpl->areTypesABICompatible(Caller, Callee, Types);
1403}
1404
1406 Type *Ty) const {
1407 return TTIImpl->isIndexedLoadLegal(Mode, Ty);
1408}
1409
1411 Type *Ty) const {
1412 return TTIImpl->isIndexedStoreLegal(Mode, Ty);
1413}
1414
1416 return TTIImpl->getLoadStoreVecRegBitWidth(AS);
1417}
1418
1420 return TTIImpl->isLegalToVectorizeLoad(LI);
1421}
1422
1424 return TTIImpl->isLegalToVectorizeStore(SI);
1425}
1426
1428 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
1429 return TTIImpl->isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
1430 AddrSpace);
1431}
1432
1434 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
1435 return TTIImpl->isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
1436 AddrSpace);
1437}
1438
1440 const RecurrenceDescriptor &RdxDesc, ElementCount VF) const {
1441 return TTIImpl->isLegalToVectorizeReduction(RdxDesc, VF);
1442}
1443
1445 return TTIImpl->isElementTypeLegalForScalableVector(Ty);
1446}
1447
1449 unsigned LoadSize,
1450 unsigned ChainSizeInBytes,
1451 VectorType *VecTy) const {
1452 return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
1453}
1454
1456 unsigned StoreSize,
1457 unsigned ChainSizeInBytes,
1458 VectorType *VecTy) const {
1459 return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
1460}
1461
1463 bool IsEpilogue) const {
1464 return TTIImpl->preferFixedOverScalableIfEqualCost(IsEpilogue);
1465}
1466
1468 Type *Ty) const {
1469 return TTIImpl->preferInLoopReduction(Kind, Ty);
1470}
1471
1473 return TTIImpl->preferAlternateOpcodeVectorization();
1474}
1475
1477 return TTIImpl->preferPredicatedReductionSelect();
1478}
1479
1481 ElementCount Iters) const {
1482 return TTIImpl->preferEpilogueVectorization(Iters);
1483}
1484
1486 return TTIImpl->shouldConsiderVectorizationRegPressure();
1487}
1488
1491 return TTIImpl->getVPLegalizationStrategy(VPI);
1492}
1493
1495 return TTIImpl->hasArmWideBranch(Thumb);
1496}
1497
1499 return TTIImpl->getFeatureMask(F);
1500}
1501
1503 return TTIImpl->getPriorityMask(F);
1504}
1505
1507 return TTIImpl->isMultiversionedFunction(F);
1508}
1509
1511 return TTIImpl->getMaxNumArgs();
1512}
1513
1515 return TTIImpl->shouldExpandReduction(II);
1516}
1517
1520 const IntrinsicInst *II) const {
1521 return TTIImpl->getPreferredExpandedReductionShuffle(II);
1522}
1523
1525 return TTIImpl->getGISelRematGlobalCost();
1526}
1527
1529 return TTIImpl->getMinTripCountTailFoldingThreshold();
1530}
1531
1533 return TTIImpl->supportsScalableVectors();
1534}
1535
1537 return TTIImpl->enableScalableVectorization();
1538}
1539
1541 return TTIImpl->hasActiveVectorLength();
1542}
1543
1545 Instruction *I, SmallVectorImpl<Use *> &OpsToSink) const {
1546 return TTIImpl->isProfitableToSinkOperands(I, OpsToSink);
1547}
1548
1550 return TTIImpl->isVectorShiftByScalarCheap(Ty);
1551}
1552
1553unsigned
1555 Type *ArrayType) const {
1556 return TTIImpl->getNumBytesToPadGlobalArray(Size, ArrayType);
1557}
1558
1560 const Function &F,
1561 SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const {
1562 return TTIImpl->collectKernelLaunchBounds(F, LB);
1563}
1564
1566 return TTIImpl->allowVectorElementIndexingUsingGEP();
1567}
1568
1570 const SmallBitVector &UniformArgs) const {
1571 return TTIImpl->isUniform(I, UniformArgs);
1572}
1573
1575
1576TargetIRAnalysis::TargetIRAnalysis() : TTICallback(&getDefaultTTI) {}
1577
1579 std::function<Result(const Function &)> TTICallback)
1580 : TTICallback(std::move(TTICallback)) {}
1581
1584 assert(!F.isIntrinsic() && "Should not request TTI for intrinsics");
1585 return TTICallback(F);
1586}
1587
1588AnalysisKey TargetIRAnalysis::Key;
1589
1590TargetIRAnalysis::Result TargetIRAnalysis::getDefaultTTI(const Function &F) {
1591 return Result(F.getDataLayout());
1592}
1593
1594// Register the basic pass.
1596 "Target Transform Information", false, true)
1598
1599void TargetTransformInfoWrapperPass::anchor() {}
1600
1603
1607
1609 FunctionAnalysisManager DummyFAM;
1610 TTI = TIRA.run(F, DummyFAM);
1611 return *TTI;
1612}
1613
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define X(NUM, ENUM, NAME)
Definition ELF.h:853
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static cl::opt< bool > ForceNestedLoop("force-nested-hardware-loop", cl::Hidden, cl::init(false), cl::desc("Force allowance of nested hardware loops"))
static cl::opt< bool > ForceHardwareLoopPHI("force-hardware-loop-phi", cl::Hidden, cl::init(false), cl::desc("Force hardware loop counter to be updated through a phi"))
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
uint64_t IntrinsicInst * II
if(PassOpts->AAPipeline)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file provides helpers for the implementation of a TargetTransformInfo-conforming class.
static cl::opt< unsigned > PredictableBranchThreshold("predictable-branch-threshold", cl::init(99), cl::Hidden, cl::desc("Use this to override the target's predictable branch threshold (%)."))
static cl::opt< bool > EnableReduxCost("costmodel-reduxcost", cl::init(false), cl::Hidden, cl::desc("Recognize reduction patterns."))
static cl::opt< unsigned > MinPageSize("min-page-size", cl::init(0), cl::Hidden, cl::desc("Use this to override the target's minimum page size."))
static cl::opt< unsigned > CacheLineSize("cache-line-size", cl::init(0), cl::Hidden, cl::desc("Use this to override the target cache line size when " "specified by the user."))
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition APInt.h:78
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:130
iterator begin() const
Definition ArrayRef.h:129
Class to represent array types.
A cache of @llvm.assume calls within a function.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM Basic Block Representation.
Definition BasicBlock.h:62
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
Conditional Branch instruction.
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
Class to represent function types.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
ImmutablePass class - This class is used to provide information that does not need to be run.
Definition Pass.h:285
ImmutablePass(char &pid)
Definition Pass.h:287
The core instruction combiner logic.
LLVM_ABI IntrinsicCostAttributes(Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarCost=InstructionCost::getInvalid(), bool TypeBasedOnly=false)
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Information for memory intrinsic cost model.
The optimization diagnostic interface.
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
This class represents a constant integer value.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
An instruction for storing to memory.
Multiway switch.
Analysis pass providing the TargetTransformInfo.
LLVM_ABI Result run(const Function &F, FunctionAnalysisManager &)
LLVM_ABI TargetIRAnalysis()
Default construct a target IR analysis.
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
StringRef getName(LibFunc F) const
bool isFunctionVectorizable(StringRef F, const ElementCount &VF) const
CRTP base class for use as a mix-in that aids implementing a TargetTransformInfo-compatible class.
Wrapper pass for TargetTransformInfo.
TargetTransformInfoWrapperPass()
We must provide a default constructor for the pass but it should never be used.
TargetTransformInfo & getTTI(const Function &F)
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const
LLVM_ABI Value * getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType, bool CanCreate=true) const
LLVM_ABI bool isLegalToVectorizeLoad(LoadInst *LI) const
LLVM_ABI std::optional< unsigned > getVScaleForTuning() const
static LLVM_ABI CastContextHint getCastContextHint(const Instruction *I)
Calculates a CastContextHint from I.
LLVM_ABI unsigned getMaxNumArgs() const
LLVM_ABI bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const
Return false if a AS0 address cannot possibly alias a AS1 address.
LLVM_ABI bool isLegalMaskedScatter(Type *DataType, Align Alignment) const
Return true if the target supports masked scatter.
LLVM_ABI bool shouldBuildLookupTables() const
Return true if switches should be turned into lookup tables for the target.
LLVM_ABI bool isLegalToVectorizeStore(StoreInst *SI) const
LLVM_ABI InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of an extended reduction pattern, similar to getArithmeticReductionCost of an Add/...
LLVM_ABI bool areTypesABICompatible(const Function *Caller, const Function *Callee, ArrayRef< Type * > Types) const
LLVM_ABI bool enableAggressiveInterleaving(bool LoopHasReductions) const
Don't restrict interleaved unrolling to small loops.
LLVM_ABI bool isMultiversionedFunction(const Function &F) const
Returns true if this is an instance of a function with multiple versions.
LLVM_ABI bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const
Return true if it is faster to check if a floating-point value is NaN (or not-NaN) versus a compariso...
LLVM_ABI bool isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace, MaskKind MaskKind=VariableOrConstantMask) const
Return true if the target supports masked store.
LLVM_ABI bool supportsEfficientVectorElementLoadStore() const
If target has efficient vector element load/store instructions, it can return true here so that inser...
LLVM_ABI unsigned getAssumedAddrSpace(const Value *V) const
LLVM_ABI bool preferAlternateOpcodeVectorization() const
LLVM_ABI bool shouldDropLSRSolutionIfLessProfitable() const
Return true if LSR should drop a found solution if it's calculated to be less profitable than the bas...
LLVM_ABI bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2) const
Return true if LSR cost of C1 is lower than C2.
VectorInstrContext
Represents a hint about the context in which an insert/extract is used.
@ None
The insert/extract is not used with a load/store.
@ Load
The value being inserted comes from a load (InsertElement only).
@ Store
The extracted value is stored (ExtractElement only).
LLVM_ABI unsigned getPrefetchDistance() const
LLVM_ABI Type * getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicElementSize=std::nullopt) const
LLVM_ABI bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const
Return true if the target supports masked expand load.
LLVM_ABI bool prefersVectorizedAddressing() const
Return true if target doesn't mind addresses in vectors.
LLVM_ABI InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo Op1Info={OK_AnyValue, OP_None}, OperandValueInfo Op2Info={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
LLVM_ABI bool hasBranchDivergence(const Function *F=nullptr) const
Return true if branch divergence exists.
LLVM_ABI bool preferEpilogueVectorization(ElementCount Iters) const
Return true if the loop vectorizer should consider vectorizing an otherwise scalar epilogue loop if t...
LLVM_ABI MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const
LLVM_ABI void getUnrollingPreferences(Loop *L, ScalarEvolution &, UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const
Get target-customized preferences for the generic loop unrolling transformation.
LLVM_ABI bool shouldBuildLookupTablesForConstant(Constant *C) const
Return true if switches should be turned into lookup tables containing this constant value for the ta...
LLVM_ABI TailFoldingStyle getPreferredTailFoldingStyle() const
Query the target what the preferred style of tail folding is.
LLVM_ABI bool supportsTailCallFor(const CallBase *CB) const
If target supports tail call on CB.
LLVM_ABI std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
Targets can implement their own combinations for target-specific intrinsics.
LLVM_ABI bool isProfitableLSRChainElement(Instruction *I) const
LLVM_ABI TypeSize getRegisterBitWidth(RegisterKind K) const
MaskKind
Some targets only support masked load/store with a constant mask.
LLVM_ABI unsigned getInlineCallPenalty(const Function *F, const CallBase &Call, unsigned DefaultCallPenalty) const
Returns a penalty for invoking call Call in F.
LLVM_ABI InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
Estimate the overhead of scalarizing operands with the given types.
LLVM_ABI bool hasActiveVectorLength() const
LLVM_ABI bool isExpensiveToSpeculativelyExecute(const Instruction *I) const
Return true if the cost of the instruction is too high to speculatively execute and should be kept be...
LLVM_ABI bool preferFixedOverScalableIfEqualCost(bool IsEpilogue) const
LLVM_ABI bool isLegalMaskedGather(Type *DataType, Align Alignment) const
Return true if the target supports masked gather.
LLVM_ABI ValueUniformity getValueUniformity(const Value *V) const
Get target-specific uniformity information for a value.
static LLVM_ABI OperandValueInfo commonOperandInfo(const Value *X, const Value *Y)
Collect common data between two OperandValueInfo inputs.
LLVM_ABI InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo OpdInfo={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
LLVM_ABI std::optional< unsigned > getMaxVScale() const
LLVM_ABI InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const
LLVM_ABI bool allowVectorElementIndexingUsingGEP() const
Returns true if GEP should not be used to index into vectors for this target.
LLVM_ABI InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, bool UseMaskForCond=false, bool UseMaskForGaps=false) const
LLVM_ABI bool preferTailFoldingOverEpilogue(TailFoldingInfo *TFI) const
Query the target whether it would be preferred to create a tail-folded vector loop,...
LLVM_ABI bool isSingleThreaded() const
LLVM_ABI std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
Can be used to implement target-specific instruction combining.
LLVM_ABI bool enableOrderedReductions() const
Return true if we should be enabling ordered reductions for the target.
LLVM_ABI unsigned getInliningCostBenefitAnalysisProfitableMultiplier() const
LLVM_ABI InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask={}, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, int Index=0, VectorType *SubTp=nullptr, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const
LLVM_ABI InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
LLVM_ABI InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of vector reduction intrinsics.
LLVM_ABI unsigned getAtomicMemIntrinsicMaxElementSize() const
LLVM_ABI InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
LLVM_ABI InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index=-1, const Value *Op0=nullptr, const Value *Op1=nullptr, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
LLVM_ABI std::pair< KnownBits, KnownBits > computeKnownBitsAddrSpaceCast(unsigned ToAS, const Value &PtrOp) const
LLVM_ABI bool LSRWithInstrQueries() const
Return true if the loop strength reduce pass should make Instruction* based TTI queries to isLegalAdd...
LLVM_ABI unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
LLVM_ABI VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const
static LLVM_ABI PartialReductionExtendKind getPartialReductionExtendKind(Instruction *I)
Get the kind of extension that an instruction represents.
LLVM_ABI bool shouldConsiderVectorizationRegPressure() const
LLVM_ABI bool enableWritePrefetching() const
LLVM_ABI bool shouldTreatInstructionLikeSelect(const Instruction *I) const
Should the Select Optimization pass treat the given instruction like a select, potentially converting...
LLVM_ABI bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
LLVM_ABI bool shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const
LLVM_ABI InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType=nullptr, TargetCostKind CostKind=TCK_SizeAndLatency) const
Estimate the cost of a GEP operation when lowered.
LLVM_ABI bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
LLVM_ABI bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace) const
Return true is the target supports interleaved access for the given vector type VTy,...
LLVM_ABI unsigned getRegUsageForType(Type *Ty) const
Returns the estimated number of registers required to represent Ty.
LLVM_ABI bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const
\Returns true if the target supports broadcasting a load to a vector of type <NumElements x ElementTy...
LLVM_ABI bool isIndexedStoreLegal(enum MemIndexedMode Mode, Type *Ty) const
LLVM_ABI std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
LLVM_ABI InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of an extended reduction pattern, similar to getArithmeticReductionCost of a reduc...
LLVM_ABI unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const
LLVM_ABI InstructionCost getRegisterClassReloadCost(unsigned ClassID, TargetCostKind CostKind) const
LLVM_ABI ReductionShuffle getPreferredExpandedReductionShuffle(const IntrinsicInst *II) const
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
LLVM_ABI unsigned getRegisterClassForType(bool Vector, Type *Ty=nullptr) const
LLVM_ABI bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace=0, Instruction *I=nullptr, int64_t ScalableOffset=0) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
LLVM_ABI PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const
Return hardware support for population count.
LLVM_ABI unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
LLVM_ABI bool isElementTypeLegalForScalableVector(Type *Ty) const
LLVM_ABI bool forceScalarizeMaskedGather(VectorType *Type, Align Alignment) const
Return true if the target forces scalarizing of llvm.masked.gather intrinsics.
LLVM_ABI unsigned getMaxPrefetchIterationsAhead() const
LLVM_ABI bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const
Return true if globals in this address space can have initializers other than undef.
LLVM_ABI ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const
LLVM_ABI InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TargetCostKind CostKind) const
LLVM_ABI bool enableMaskedInterleavedAccessVectorization() const
Enable matching of interleaved access groups that contain predicated accesses or gaps and therefore v...
LLVM_ABI InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty, TargetCostKind CostKind, Instruction *Inst=nullptr) const
Return the expected cost of materialization for the given integer immediate of the specified type for...
LLVM_ABI bool isLegalStridedLoadStore(Type *DataType, Align Alignment) const
Return true if the target supports strided load.
LLVM_ABI TargetTransformInfo & operator=(TargetTransformInfo &&RHS)
LLVM_ABI InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF=FastMathFlags(), TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
LLVM_ABI InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr, const TargetLibraryInfo *TLibInfo=nullptr) const
This is an approximation of reciprocal throughput of a math/logic op.
LLVM_ABI bool enableSelectOptimize() const
Should the Select Optimization pass be enabled and ran.
LLVM_ABI bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
Return any intrinsic address operand indexes which may be rewritten if they use a flat address space ...
OperandValueProperties
Additional properties of an operand's values.
LLVM_ABI int getInliningLastCallToStaticBonus() const
LLVM_ABI InstructionCost getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const PointersChainInfo &Info, Type *AccessTy, TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Estimate the cost of a chain of pointers (typically pointer operands of a chain of loads or stores wi...
LLVM_ABI bool isIndexedLoadLegal(enum MemIndexedMode Mode, Type *Ty) const
LLVM_ABI unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const
LLVM_ABI unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy, Align Alignment, unsigned AddrSpace) const
LLVM_ABI bool isLegalICmpImmediate(int64_t Imm) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
LLVM_ABI bool isTypeLegal(Type *Ty) const
Return true if this type is legal.
LLVM_ABI bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, ElementCount VF) const
LLVM_ABI std::optional< unsigned > getCacheAssociativity(CacheLevel Level) const
LLVM_ABI bool isLegalNTLoad(Type *DataType, Align Alignment) const
Return true if the target supports nontemporal load.
LLVM_ABI bool isUniform(const Instruction *I, const SmallBitVector &UniformArgs) const
Determine if an instruction with Custom uniformity can be proven uniform based on which operands are ...
LLVM_ABI InstructionCost getMemcpyCost(const Instruction *I) const
LLVM_ABI unsigned adjustInliningThreshold(const CallBase *CB) const
LLVM_ABI bool isLegalAddImmediate(int64_t Imm) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
LLVM_ABI bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const
Identifies if the vector form of the intrinsic that returns a struct is overloaded at the struct elem...
LLVM_ABI unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
LLVM_ABI InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const
LLVM_ABI Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
Rewrite intrinsic call II such that OldV will be replaced with NewV, which has a different address sp...
LLVM_ABI InstructionCost getCostOfKeepingLiveOverCall(ArrayRef< Type * > Tys) const
LLVM_ABI bool canSaveCmp(Loop *L, CondBrInst **BI, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, TargetLibraryInfo *LibInfo) const
Return true if the target can save a compare for loop count, for example hardware loop saves a compar...
LLVM_ABI unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Some HW prefetchers can handle accesses up to a certain constant stride.
LLVM_ABI bool shouldPrefetchAddressSpace(unsigned AS) const
LLVM_ABI InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TargetCostKind CostKind) const
Return the expected cost of materializing for the given integer immediate of the specified type.
LLVM_ABI unsigned getMinVectorRegisterBitWidth() const
LLVM_ABI InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const
LLVM_ABI bool isLegalNTStore(Type *DataType, Align Alignment) const
Return true if the target supports nontemporal store.
LLVM_ABI unsigned getFlatAddressSpace() const
Returns the address space ID for a target's 'flat' address space.
LLVM_ABI bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const
It can be advantageous to detach complex constants from their uses to make their generation cheaper.
LLVM_ABI bool hasArmWideBranch(bool Thumb) const
LLVM_ABI const char * getRegisterClassName(unsigned ClassID) const
LLVM_ABI bool shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const
LLVM_ABI APInt getPriorityMask(const Function &F) const
Returns a bitmask constructed from the target-features or fmv-features metadata of a function corresp...
LLVM_ABI BranchProbability getPredictableBranchThreshold() const
If a branch or a select condition is skewed in one direction by more than this factor,...
LLVM_ABI TargetTransformInfo(std::unique_ptr< const TargetTransformInfoImplBase > Impl)
Construct a TTI object using a type implementing the Concept API below.
LLVM_ABI bool preferInLoopReduction(RecurKind Kind, Type *Ty) const
LLVM_ABI unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const
LLVM_ABI bool hasConditionalLoadStoreForType(Type *Ty, bool IsStore) const
LLVM_ABI unsigned getCacheLineSize() const
LLVM_ABI bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace=0, Align Alignment=Align(1), unsigned *Fast=nullptr) const
Determine if the target supports unaligned memory accesses.
LLVM_ABI bool shouldCopyAttributeWhenOutliningFrom(const Function *Caller, const Attribute &Attr) const
LLVM_ABI APInt getAddrSpaceCastPreservedPtrMask(unsigned SrcAS, unsigned DstAS) const
Returns a mask indicating which bits of a pointer remain unchanged when casting between address space...
LLVM_ABI int getInlinerVectorBonusPercent() const
LLVM_ABI unsigned getEpilogueVectorizationMinVF() const
LLVM_ABI void collectKernelLaunchBounds(const Function &F, SmallVectorImpl< std::pair< StringRef, int64_t > > &LB) const
Collect kernel launch bounds for F into LB.
PopcntSupportKind
Flags indicating the kind of support for population count.
LLVM_ABI bool preferPredicatedReductionSelect() const
LLVM_ABI InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty) const
Return the expected cost for the given integer when optimising for size.
LLVM_ABI AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const
Return the preferred addressing mode LSR should make efforts to generate.
LLVM_ABI bool isLoweredToCall(const Function *F) const
Test whether calls to a function lower to actual program function calls.
LLVM_ABI bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
LLVM_ABI bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const
Query the target whether it would be profitable to convert the given loop into a hardware loop.
LLVM_ABI unsigned getInliningThresholdMultiplier() const
LLVM_ABI InstructionCost getBranchMispredictPenalty() const
Returns estimated penalty of a branch misprediction in latency.
LLVM_ABI unsigned getNumberOfRegisters(unsigned ClassID) const
LLVM_ABI bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask) const
Return true if this is an alternating opcode pattern that can be lowered to a single instruction on t...
LLVM_ABI bool isProfitableToHoist(Instruction *I) const
Return true if it is profitable to hoist instruction in the then/else to before if.
LLVM_ABI bool supportsScalableVectors() const
LLVM_ABI bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const
Return true if the given instruction (assumed to be a memory access instruction) has a volatile varia...
LLVM_ABI bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) const
Return true if the target supports masked compress store.
LLVM_ABI std::optional< unsigned > getMinPageSize() const
LLVM_ABI bool isFPVectorizationPotentiallyUnsafe() const
Indicate that it is potentially unsafe to automatically vectorize floating-point operations because t...
LLVM_ABI InstructionCost getInsertExtractValueCost(unsigned Opcode, TTI::TargetCostKind CostKind) const
LLVM_ABI bool shouldBuildRelLookupTables() const
Return true if lookup tables should be turned into relative lookup tables.
LLVM_ABI std::optional< unsigned > getCacheSize(CacheLevel Level) const
LLVM_ABI std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
Can be used to implement target-specific instruction combining.
LLVM_ABI bool isLegalAddScalableImmediate(int64_t Imm) const
Return true if adding the specified scalable immediate is legal, that is the target has add instructi...
LLVM_ABI bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const
Identifies if the vector form of the intrinsic has a scalar operand.
LLVM_ABI bool hasDivRemOp(Type *DataType, bool IsSigned) const
Return true if the target has a unified operation to calculate division and remainder.
LLVM_ABI InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Returns the cost estimation for alternating opcode pattern that can be lowered to a single instructio...
LLVM_ABI bool enableInterleavedAccessVectorization() const
Enable matching of interleaved access groups.
LLVM_ABI unsigned getMinTripCountTailFoldingThreshold() const
LLVM_ABI InstructionCost getPartialReductionCost(unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType, ElementCount VF, PartialReductionExtendKind OpAExtend, PartialReductionExtendKind OpBExtend, std::optional< unsigned > BinOp, TTI::TargetCostKind CostKind, std::optional< FastMathFlags > FMF) const
LLVM_ABI InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
LLVM_ABI unsigned getMaxInterleaveFactor(ElementCount VF) const
LLVM_ABI bool enableScalableVectorization() const
LLVM_ABI bool useFastCCForInternalCall(Function &F) const
Return true if the input function is internal, should use fastcc calling convention.
LLVM_ABI bool isVectorShiftByScalarCheap(Type *Ty) const
Return true if it's significantly cheaper to shift a vector by a uniform scalar than by an amount whi...
LLVM_ABI bool isNumRegsMajorCostOfLSR() const
Return true if LSR major cost is number of registers.
LLVM_ABI unsigned getInliningCostBenefitAnalysisSavingsMultiplier() const
LLVM_ABI bool isLegalMaskedVectorHistogram(Type *AddrType, Type *DataType) const
LLVM_ABI unsigned getGISelRematGlobalCost() const
LLVM_ABI unsigned getNumBytesToPadGlobalArray(unsigned Size, Type *ArrayType) const
static LLVM_ABI Instruction::CastOps getOpcodeForPartialReductionExtendKind(PartialReductionExtendKind Kind)
Get the cast opcode for an extension kind.
MemIndexedMode
The type of load/store indexing.
LLVM_ABI bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace, MaskKind MaskKind=VariableOrConstantMask) const
Return true if the target supports masked load.
LLVM_ABI InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const
LLVM_ABI bool areInlineCompatible(const Function *Caller, const Function *Callee) const
LLVM_ABI bool useColdCCForColdCall(Function &F) const
Return true if the input function which is cold at all call sites, should use coldcc calling conventi...
LLVM_ABI InstructionCost getFPOpCost(Type *Ty) const
Return the expected cost of supporting the floating point operation of the specified type.
LLVM_ABI bool supportsTailCalls() const
If the target supports tail calls.
LLVM_ABI bool canMacroFuseCmp() const
Return true if the target can fuse a compare and branch.
LLVM_ABI bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Query the target whether the specified address space cast from FromAS to ToAS is valid.
LLVM_ABI unsigned getNumberOfParts(Type *Tp) const
AddressingModeKind
Which addressing mode Loop Strength Reduction will try to generate.
LLVM_ABI InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace=0) const
Return the cost of the scaling factor used in the addressing mode represented by AM for this target,...
LLVM_ABI bool isTruncateFree(Type *Ty1, Type *Ty2) const
Return true if it's free to truncate a value of type Ty1 to type Ty2.
LLVM_ABI bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const
Return true if sinking I's operands to the same basic block as I is profitable, e....
LLVM_ABI void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicCpySize=std::nullopt) const
LLVM_ABI bool forceScalarizeMaskedScatter(VectorType *Type, Align Alignment) const
Return true if the target forces scalarizing of llvm.masked.scatter intrinsics.
LLVM_ABI bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const
Identifies if the vector form of the intrinsic is overloaded on the type of the operand at index OpdI...
static VectorInstrContext getVectorInstrContextHint(const Instruction *I)
Calculates a VectorInstrContext from I.
LLVM_ABI bool haveFastSqrt(Type *Ty) const
Return true if the hardware has a fast square-root instruction.
LLVM_ABI bool shouldExpandReduction(const IntrinsicInst *II) const
LLVM_ABI InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
Estimate the overhead of scalarizing an instruction.
LLVM_ABI uint64_t getMaxMemIntrinsicInlineSizeThreshold() const
Returns the maximum memset / memcpy size in bytes that still makes it profitable to inline the call.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
LLVM_ABI APInt getFeatureMask(const Function &F) const
Returns a bitmask constructed from the target-features or fmv-features metadata of a function corresp...
LLVM_ABI void getPeelingPreferences(Loop *L, ScalarEvolution &SE, PeelingPreferences &PP) const
Get target-customized preferences for the generic loop peeling transformation.
LLVM_ABI InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency) const
LLVM_ABI InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
CastContextHint
Represents a hint about the context in which a cast is used.
@ Masked
The cast is used with a masked load/store.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
@ GatherScatter
The cast is used with a gather/scatter.
LLVM_ABI InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const
LLVM_ABI InstructionCost getRegisterClassSpillCost(unsigned ClassID, TargetCostKind CostKind) const
OperandValueKind
Additional information about an operand's possible values.
CacheLevel
The possible cache levels.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:65
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:313
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
@ Length
Definition DWP.cpp:557
InstructionCost Cost
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)
Return true if the control flow in RPOTraversal is irreducible.
Definition CFG.h:154
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI ImmutablePass * createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA)
Create an analysis pass wrapper around a TTI object.
RecurKind
These are the kinds of recurrences that we support.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1916
auto predecessors(const MachineBasicBlock *BB)
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
ValueUniformity
Enum describing how values behave with respect to uniformity and divergence, to answer the question: ...
Definition Uniformity.h:18
@ NeverUniform
The result value can never be assumed to be uniform.
Definition Uniformity.h:26
@ Default
The result value is uniform if and only if all operands are uniform.
Definition Uniformity.h:20
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:874
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
Attributes of a target dependent hardware loop.
LLVM_ABI bool canAnalyze(LoopInfo &LI)
LLVM_ABI bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, bool ForceNestedLoop=false, bool ForceHardwareLoopPHI=false)
Information about a load/store intrinsic defined by the target.
Returns options for expansion of memcmp. IsZeroCmp is.
OperandValueInfo mergeWith(const OperandValueInfo OpInfoY)
Describe known properties for a set of pointers.
Parameters that control the generic loop unrolling transformation.