LLVM 23.0.0git
TargetTransformInfo.cpp
Go to the documentation of this file.
1//===- llvm/Analysis/TargetTransformInfo.cpp ------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
11#include "llvm/Analysis/CFG.h"
15#include "llvm/IR/CFG.h"
16#include "llvm/IR/Dominators.h"
17#include "llvm/IR/Instruction.h"
20#include "llvm/IR/Module.h"
21#include "llvm/IR/Operator.h"
24#include <optional>
25#include <utility>
26
27using namespace llvm;
28using namespace PatternMatch;
29
30#define DEBUG_TYPE "tti"
31
32static cl::opt<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false),
34 cl::desc("Recognize reduction patterns."));
35
37 "cache-line-size", cl::init(0), cl::Hidden,
38 cl::desc("Use this to override the target cache line size when "
39 "specified by the user."));
40
42 "min-page-size", cl::init(0), cl::Hidden,
43 cl::desc("Use this to override the target's minimum page size."));
44
46 "predictable-branch-threshold", cl::init(99), cl::Hidden,
48 "Use this to override the target's predictable branch threshold (%)."));
49
50namespace {
51/// No-op implementation of the TTI interface using the utility base
52/// classes.
53///
54/// This is used when no target specific information is available.
55struct NoTTIImpl : TargetTransformInfoImplCRTPBase<NoTTIImpl> {
56 explicit NoTTIImpl(const DataLayout &DL)
57 : TargetTransformInfoImplCRTPBase<NoTTIImpl>(DL) {}
58};
59} // namespace
60
62 std::unique_ptr<const TargetTransformInfoImplBase> Impl)
63 : TTIImpl(std::move(Impl)) {}
64
66 // If the loop has irreducible control flow, it can not be converted to
67 // Hardware loop.
68 LoopBlocksRPO RPOT(L);
69 RPOT.perform(&LI);
71 return false;
72 return true;
73}
74
76 Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarizationCost,
77 bool TypeBasedOnly)
78 : II(dyn_cast<IntrinsicInst>(&CI)), RetTy(CI.getType()), IID(Id),
79 ScalarizationCost(ScalarizationCost) {
80
81 if (const auto *FPMO = dyn_cast<FPMathOperator>(&CI))
82 FMF = FPMO->getFastMathFlags();
83
84 if (!TypeBasedOnly)
85 Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end());
87 ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());
88}
89
92 FastMathFlags Flags,
93 const IntrinsicInst *I,
94 InstructionCost ScalarCost)
95 : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
96 ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
97}
98
101 : RetTy(Ty), IID(Id) {
102
103 Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
104 ParamTys.reserve(Arguments.size());
105 for (const Value *Argument : Arguments)
106 ParamTys.push_back(Argument->getType());
107}
108
112 FastMathFlags Flags,
113 const IntrinsicInst *I,
114 InstructionCost ScalarCost)
115 : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
116 ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
117 Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
118}
119
121 // Match default options:
122 // - hardware-loop-counter-bitwidth = 32
123 // - hardware-loop-decrement = 1
124 CountType = Type::getInt32Ty(L->getHeader()->getContext());
125 LoopDecrement = ConstantInt::get(CountType, 1);
126}
127
129 LoopInfo &LI, DominatorTree &DT,
130 bool ForceNestedLoop,
132 SmallVector<BasicBlock *, 4> ExitingBlocks;
133 L->getExitingBlocks(ExitingBlocks);
134
135 for (BasicBlock *BB : ExitingBlocks) {
136 // If we pass the updated counter back through a phi, we need to know
137 // which latch the updated value will be coming from.
138 if (!L->isLoopLatch(BB)) {
140 continue;
141 }
142
143 const SCEV *EC = SE.getExitCount(L, BB);
145 continue;
146 if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
147 if (ConstEC->getValue()->isZero())
148 continue;
149 } else if (!SE.isLoopInvariant(EC, L))
150 continue;
151
152 if (SE.getTypeSizeInBits(EC->getType()) > CountType->getBitWidth())
153 continue;
154
155 // If this exiting block is contained in a nested loop, it is not eligible
156 // for insertion of the branch-and-decrement since the inner loop would
157 // end up messing up the value in the CTR.
158 if (!IsNestingLegal && LI.getLoopFor(BB) != L && !ForceNestedLoop)
159 continue;
160
161 // We now have a loop-invariant count of loop iterations (which is not the
162 // constant zero) for which we know that this loop will not exit via this
163 // existing block.
164
165 // We need to make sure that this block will run on every loop iteration.
166 // For this to be true, we must dominate all blocks with backedges. Such
167 // blocks are in-loop predecessors to the header block.
168 bool NotAlways = false;
169 for (BasicBlock *Pred : predecessors(L->getHeader())) {
170 if (!L->contains(Pred))
171 continue;
172
173 if (!DT.dominates(BB, Pred)) {
174 NotAlways = true;
175 break;
176 }
177 }
178
179 if (NotAlways)
180 continue;
181
182 // Make sure this blocks ends with a conditional branch.
183 Instruction *TI = BB->getTerminator();
184 if (!TI)
185 continue;
186
187 if (CondBrInst *BI = dyn_cast<CondBrInst>(TI))
188 ExitBranch = BI;
189 else
190 continue;
191
192 // Note that this block may not be the loop latch block, even if the loop
193 // has a latch block.
194 ExitBlock = BB;
195 ExitCount = EC;
196 break;
197 }
198
199 if (!ExitBlock)
200 return false;
201 return true;
202}
203
205 : TTIImpl(std::make_unique<NoTTIImpl>(DL)) {}
206
208
211
213 TTIImpl = std::move(RHS.TTIImpl);
214 return *this;
215}
216
218 return TTIImpl->getInliningThresholdMultiplier();
219}
220
221unsigned
223 return TTIImpl->getInliningCostBenefitAnalysisSavingsMultiplier();
224}
225
226unsigned
228 const {
229 return TTIImpl->getInliningCostBenefitAnalysisProfitableMultiplier();
230}
231
233 return TTIImpl->getInliningLastCallToStaticBonus();
234}
235
236unsigned
238 return TTIImpl->adjustInliningThreshold(CB);
239}
240
242 const AllocaInst *AI) const {
243 return TTIImpl->getCallerAllocaCost(CB, AI);
244}
245
247 return TTIImpl->getInlinerVectorBonusPercent();
248}
249
251 Type *PointeeType, const Value *Ptr, ArrayRef<const Value *> Operands,
252 Type *AccessType, TTI::TargetCostKind CostKind) const {
253 return TTIImpl->getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);
254}
255
258 const TTI::PointersChainInfo &Info, Type *AccessTy,
260 assert((Base || !Info.isSameBase()) &&
261 "If pointers have same base address it has to be provided.");
262 return TTIImpl->getPointersChainCost(Ptrs, Base, Info, AccessTy, CostKind);
263}
264
266 const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI,
267 BlockFrequencyInfo *BFI) const {
268 return TTIImpl->getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI);
269}
270
274 enum TargetCostKind CostKind) const {
275 InstructionCost Cost = TTIImpl->getInstructionCost(U, Operands, CostKind);
277 "TTI should not produce negative costs!");
278 return Cost;
279}
280
282 return PredictableBranchThreshold.getNumOccurrences() > 0
284 : TTIImpl->getPredictableBranchThreshold();
285}
286
288 return TTIImpl->getBranchMispredictPenalty();
289}
290
292 return TTIImpl->hasBranchDivergence(F);
293}
294
297 // Calls with the NoDivergenceSource attribute are always uniform.
298 if (const auto *Call = dyn_cast<CallBase>(V)) {
299 if (Call->hasFnAttr(Attribute::NoDivergenceSource))
301 }
302 return TTIImpl->getInstructionUniformity(V);
303}
304
306 unsigned ToAS) const {
307 return TTIImpl->isValidAddrSpaceCast(FromAS, ToAS);
308}
309
311 unsigned ToAS) const {
312 return TTIImpl->addrspacesMayAlias(FromAS, ToAS);
313}
314
316 return TTIImpl->getFlatAddressSpace();
317}
318
320 SmallVectorImpl<int> &OpIndexes, Intrinsic::ID IID) const {
321 return TTIImpl->collectFlatAddressOperands(OpIndexes, IID);
322}
323
325 unsigned ToAS) const {
326 return TTIImpl->isNoopAddrSpaceCast(FromAS, ToAS);
327}
328
329std::pair<KnownBits, KnownBits>
331 const Value &PtrOp) const {
332 return TTIImpl->computeKnownBitsAddrSpaceCast(ToAS, PtrOp);
333}
334
336 unsigned FromAS, unsigned ToAS, const KnownBits &FromPtrBits) const {
337 return TTIImpl->computeKnownBitsAddrSpaceCast(FromAS, ToAS, FromPtrBits);
338}
339
341 unsigned SrcAS, unsigned DstAS) const {
342 return TTIImpl->getAddrSpaceCastPreservedPtrMask(SrcAS, DstAS);
343}
344
346 unsigned AS) const {
347 return TTIImpl->canHaveNonUndefGlobalInitializerInAddressSpace(AS);
348}
349
351 return TTIImpl->getAssumedAddrSpace(V);
352}
353
355 return TTIImpl->isSingleThreaded();
356}
357
358std::pair<const Value *, unsigned>
360 return TTIImpl->getPredicatedAddrSpace(V);
361}
362
364 IntrinsicInst *II, Value *OldV, Value *NewV) const {
365 return TTIImpl->rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
366}
367
369 return TTIImpl->isLoweredToCall(F);
370}
371
374 TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const {
375 return TTIImpl->isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
376}
377
379 return TTIImpl->getEpilogueVectorizationMinVF();
380}
381
383 TailFoldingInfo *TFI) const {
384 return TTIImpl->preferPredicateOverEpilogue(TFI);
385}
386
388 return TTIImpl->getPreferredTailFoldingStyle();
389}
390
391std::optional<Instruction *>
393 IntrinsicInst &II) const {
394 return TTIImpl->instCombineIntrinsic(IC, II);
395}
396
398 InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
399 bool &KnownBitsComputed) const {
400 return TTIImpl->simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
401 KnownBitsComputed);
402}
403
405 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
406 APInt &UndefElts2, APInt &UndefElts3,
407 std::function<void(Instruction *, unsigned, APInt, APInt &)>
408 SimplifyAndSetOp) const {
409 return TTIImpl->simplifyDemandedVectorEltsIntrinsic(
410 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
411 SimplifyAndSetOp);
412}
413
416 OptimizationRemarkEmitter *ORE) const {
417 return TTIImpl->getUnrollingPreferences(L, SE, UP, ORE);
418}
419
421 PeelingPreferences &PP) const {
422 return TTIImpl->getPeelingPreferences(L, SE, PP);
423}
424
426 return TTIImpl->isLegalAddImmediate(Imm);
427}
428
430 return TTIImpl->isLegalAddScalableImmediate(Imm);
431}
432
434 return TTIImpl->isLegalICmpImmediate(Imm);
435}
436
438 int64_t BaseOffset,
439 bool HasBaseReg, int64_t Scale,
440 unsigned AddrSpace,
441 Instruction *I,
442 int64_t ScalableOffset) const {
443 return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
444 Scale, AddrSpace, I, ScalableOffset);
445}
446
448 const LSRCost &C2) const {
449 return TTIImpl->isLSRCostLess(C1, C2);
450}
451
453 return TTIImpl->isNumRegsMajorCostOfLSR();
454}
455
457 return TTIImpl->shouldDropLSRSolutionIfLessProfitable();
458}
459
461 return TTIImpl->isProfitableLSRChainElement(I);
462}
463
465 return TTIImpl->canMacroFuseCmp();
466}
467
469 ScalarEvolution *SE, LoopInfo *LI,
471 TargetLibraryInfo *LibInfo) const {
472 return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
473}
474
477 ScalarEvolution *SE) const {
478 return TTIImpl->getPreferredAddressingMode(L, SE);
479}
480
482 unsigned AddressSpace,
483 TTI::MaskKind MaskKind) const {
484 return TTIImpl->isLegalMaskedStore(DataType, Alignment, AddressSpace,
485 MaskKind);
486}
487
489 unsigned AddressSpace,
490 TTI::MaskKind MaskKind) const {
491 return TTIImpl->isLegalMaskedLoad(DataType, Alignment, AddressSpace,
492 MaskKind);
493}
494
496 Align Alignment) const {
497 return TTIImpl->isLegalNTStore(DataType, Alignment);
498}
499
500bool TargetTransformInfo::isLegalNTLoad(Type *DataType, Align Alignment) const {
501 return TTIImpl->isLegalNTLoad(DataType, Alignment);
502}
503
505 ElementCount NumElements) const {
506 return TTIImpl->isLegalBroadcastLoad(ElementTy, NumElements);
507}
508
510 Align Alignment) const {
511 return TTIImpl->isLegalMaskedGather(DataType, Alignment);
512}
513
515 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
516 const SmallBitVector &OpcodeMask) const {
517 return TTIImpl->isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask);
518}
519
521 Align Alignment) const {
522 return TTIImpl->isLegalMaskedScatter(DataType, Alignment);
523}
524
526 Align Alignment) const {
527 return TTIImpl->forceScalarizeMaskedGather(DataType, Alignment);
528}
529
531 Align Alignment) const {
532 return TTIImpl->forceScalarizeMaskedScatter(DataType, Alignment);
533}
534
536 Align Alignment) const {
537 return TTIImpl->isLegalMaskedCompressStore(DataType, Alignment);
538}
539
541 Align Alignment) const {
542 return TTIImpl->isLegalMaskedExpandLoad(DataType, Alignment);
543}
544
546 Align Alignment) const {
547 return TTIImpl->isLegalStridedLoadStore(DataType, Alignment);
548}
549
551 VectorType *VTy, unsigned Factor, Align Alignment,
552 unsigned AddrSpace) const {
553 return TTIImpl->isLegalInterleavedAccessType(VTy, Factor, Alignment,
554 AddrSpace);
555}
556
558 Type *DataType) const {
559 return TTIImpl->isLegalMaskedVectorHistogram(AddrType, DataType);
560}
561
563 return TTIImpl->enableOrderedReductions();
564}
565
566bool TargetTransformInfo::hasDivRemOp(Type *DataType, bool IsSigned) const {
567 return TTIImpl->hasDivRemOp(DataType, IsSigned);
568}
569
571 unsigned AddrSpace) const {
572 return TTIImpl->hasVolatileVariant(I, AddrSpace);
573}
574
576 return TTIImpl->prefersVectorizedAddressing();
577}
578
580 Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg,
581 int64_t Scale, unsigned AddrSpace) const {
582 InstructionCost Cost = TTIImpl->getScalingFactorCost(
583 Ty, BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace);
584 assert(Cost >= 0 && "TTI should not produce negative costs!");
585 return Cost;
586}
587
589 return TTIImpl->LSRWithInstrQueries();
590}
591
593 return TTIImpl->isTruncateFree(Ty1, Ty2);
594}
595
597 return TTIImpl->isProfitableToHoist(I);
598}
599
600bool TargetTransformInfo::useAA() const { return TTIImpl->useAA(); }
601
603 return TTIImpl->isTypeLegal(Ty);
604}
605
607 return TTIImpl->getRegUsageForType(Ty);
608}
609
611 return TTIImpl->shouldBuildLookupTables();
612}
613
615 Constant *C) const {
616 return TTIImpl->shouldBuildLookupTablesForConstant(C);
617}
618
620 return TTIImpl->shouldBuildRelLookupTables();
621}
622
624 return TTIImpl->useColdCCForColdCall(F);
625}
626
628 return TTIImpl->useFastCCForInternalCall(F);
629}
630
632 Intrinsic::ID ID) const {
633 return TTIImpl->isTargetIntrinsicTriviallyScalarizable(ID);
634}
635
637 Intrinsic::ID ID, unsigned ScalarOpdIdx) const {
638 return TTIImpl->isTargetIntrinsicWithScalarOpAtArg(ID, ScalarOpdIdx);
639}
640
642 Intrinsic::ID ID, int OpdIdx) const {
643 return TTIImpl->isTargetIntrinsicWithOverloadTypeAtArg(ID, OpdIdx);
644}
645
647 Intrinsic::ID ID, int RetIdx) const {
648 return TTIImpl->isTargetIntrinsicWithStructReturnOverloadAtField(ID, RetIdx);
649}
650
653 if (!I)
655
656 // For inserts, check if the value being inserted comes from a single-use
657 // load.
658 if (isa<InsertElementInst>(I) && isa<LoadInst>(I->getOperand(1)) &&
659 I->getOperand(1)->hasOneUse())
661
662 // For extracts, check if it has a single use that is a store.
663 if (isa<ExtractElementInst>(I) && I->hasOneUse() &&
664 isa<StoreInst>(*I->user_begin()))
666
668}
669
671 VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
672 TTI::TargetCostKind CostKind, bool ForPoisonSrc, ArrayRef<Value *> VL,
673 TTI::VectorInstrContext VIC) const {
674 return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
675 CostKind, ForPoisonSrc, VL, VIC);
676}
677
680 TTI::VectorInstrContext VIC) const {
681 return TTIImpl->getOperandsScalarizationOverhead(Tys, CostKind, VIC);
682}
683
685 return TTIImpl->supportsEfficientVectorElementLoadStore();
686}
687
689 return TTIImpl->supportsTailCalls();
690}
691
693 return TTIImpl->supportsTailCallFor(CB);
694}
695
697 bool LoopHasReductions) const {
698 return TTIImpl->enableAggressiveInterleaving(LoopHasReductions);
699}
700
702TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
703 return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp);
704}
705
707 return TTIImpl->enableSelectOptimize();
708}
709
711 const Instruction *I) const {
712 return TTIImpl->shouldTreatInstructionLikeSelect(I);
713}
714
716 return TTIImpl->enableInterleavedAccessVectorization();
717}
718
720 return TTIImpl->enableMaskedInterleavedAccessVectorization();
721}
722
724 return TTIImpl->isFPVectorizationPotentiallyUnsafe();
725}
726
727bool
729 unsigned BitWidth,
730 unsigned AddressSpace,
731 Align Alignment,
732 unsigned *Fast) const {
733 return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth,
734 AddressSpace, Alignment, Fast);
735}
736
738TargetTransformInfo::getPopcntSupport(unsigned IntTyWidthInBit) const {
739 return TTIImpl->getPopcntSupport(IntTyWidthInBit);
740}
741
743 return TTIImpl->haveFastSqrt(Ty);
744}
745
747 const Instruction *I) const {
748 return TTIImpl->isExpensiveToSpeculativelyExecute(I);
749}
750
752 return TTIImpl->isFCmpOrdCheaperThanFCmpZero(Ty);
753}
754
756 InstructionCost Cost = TTIImpl->getFPOpCost(Ty);
757 assert(Cost >= 0 && "TTI should not produce negative costs!");
758 return Cost;
759}
760
762 unsigned Idx,
763 const APInt &Imm,
764 Type *Ty) const {
765 InstructionCost Cost = TTIImpl->getIntImmCodeSizeCost(Opcode, Idx, Imm, Ty);
766 assert(Cost >= 0 && "TTI should not produce negative costs!");
767 return Cost;
768}
769
773 InstructionCost Cost = TTIImpl->getIntImmCost(Imm, Ty, CostKind);
774 assert(Cost >= 0 && "TTI should not produce negative costs!");
775 return Cost;
776}
777
779 unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty,
782 TTIImpl->getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst);
783 assert(Cost >= 0 && "TTI should not produce negative costs!");
784 return Cost;
785}
786
789 const APInt &Imm, Type *Ty,
792 TTIImpl->getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
793 assert(Cost >= 0 && "TTI should not produce negative costs!");
794 return Cost;
795}
796
798 const Instruction &Inst, const Function &Fn) const {
799 return TTIImpl->preferToKeepConstantsAttached(Inst, Fn);
800}
801
802unsigned TargetTransformInfo::getNumberOfRegisters(unsigned ClassID) const {
803 return TTIImpl->getNumberOfRegisters(ClassID);
804}
805
807 bool IsStore) const {
808 return TTIImpl->hasConditionalLoadStoreForType(Ty, IsStore);
809}
810
812 Type *Ty) const {
813 return TTIImpl->getRegisterClassForType(Vector, Ty);
814}
815
816const char *TargetTransformInfo::getRegisterClassName(unsigned ClassID) const {
817 return TTIImpl->getRegisterClassName(ClassID);
818}
819
821 unsigned ClassID, TTI::TargetCostKind CostKind) const {
822 return TTIImpl->getRegisterClassSpillCost(ClassID, CostKind);
823}
824
826 unsigned ClassID, TTI::TargetCostKind CostKind) const {
827 return TTIImpl->getRegisterClassReloadCost(ClassID, CostKind);
828}
829
832 return TTIImpl->getRegisterBitWidth(K);
833}
834
836 return TTIImpl->getMinVectorRegisterBitWidth();
837}
838
839std::optional<unsigned> TargetTransformInfo::getMaxVScale() const {
840 return TTIImpl->getMaxVScale();
841}
842
843std::optional<unsigned> TargetTransformInfo::getVScaleForTuning() const {
844 return TTIImpl->getVScaleForTuning();
845}
846
849 return TTIImpl->shouldMaximizeVectorBandwidth(K);
850}
851
853 bool IsScalable) const {
854 return TTIImpl->getMinimumVF(ElemWidth, IsScalable);
855}
856
857unsigned TargetTransformInfo::getMaximumVF(unsigned ElemWidth,
858 unsigned Opcode) const {
859 return TTIImpl->getMaximumVF(ElemWidth, Opcode);
860}
861
862unsigned TargetTransformInfo::getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
863 Type *ScalarValTy,
864 Align Alignment,
865 unsigned AddrSpace) const {
866 return TTIImpl->getStoreMinimumVF(VF, ScalarMemTy, ScalarValTy, Alignment,
867 AddrSpace);
868}
869
871 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {
872 return TTIImpl->shouldConsiderAddressTypePromotion(
873 I, AllowPromotionWithoutCommonHeader);
874}
875
877 return CacheLineSize.getNumOccurrences() > 0 ? CacheLineSize
878 : TTIImpl->getCacheLineSize();
879}
880
881std::optional<unsigned>
883 return TTIImpl->getCacheSize(Level);
884}
885
886std::optional<unsigned>
888 return TTIImpl->getCacheAssociativity(Level);
889}
890
891std::optional<unsigned> TargetTransformInfo::getMinPageSize() const {
892 return MinPageSize.getNumOccurrences() > 0 ? MinPageSize
893 : TTIImpl->getMinPageSize();
894}
895
897 return TTIImpl->getPrefetchDistance();
898}
899
901 unsigned NumMemAccesses, unsigned NumStridedMemAccesses,
902 unsigned NumPrefetches, bool HasCall) const {
903 return TTIImpl->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
904 NumPrefetches, HasCall);
905}
906
908 return TTIImpl->getMaxPrefetchIterationsAhead();
909}
910
912 return TTIImpl->enableWritePrefetching();
913}
914
916 return TTIImpl->shouldPrefetchAddressSpace(AS);
917}
918
920 unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType,
922 PartialReductionExtendKind OpBExtend, std::optional<unsigned> BinOp,
923 TTI::TargetCostKind CostKind, std::optional<FastMathFlags> FMF) const {
924 return TTIImpl->getPartialReductionCost(Opcode, InputTypeA, InputTypeB,
925 AccumType, VF, OpAExtend, OpBExtend,
926 BinOp, CostKind, FMF);
927}
928
930 return TTIImpl->getMaxInterleaveFactor(VF);
931}
932
937
938 // undef/poison don't materialize constants.
939 if (isa<UndefValue>(V))
940 return {OK_AnyValue, OP_None};
941
942 if (isa<ConstantInt>(V) || isa<ConstantFP>(V)) {
943 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
944 if (CI->getValue().isPowerOf2())
945 OpProps = OP_PowerOf2;
946 else if (CI->getValue().isNegatedPowerOf2())
947 OpProps = OP_NegatedPowerOf2;
948 }
949 return {OK_UniformConstantValue, OpProps};
950 }
951
952 // A broadcast shuffle creates a uniform value.
953 // TODO: Add support for non-zero index broadcasts.
954 // TODO: Add support for different source vector width.
955 if (const auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V))
956 if (ShuffleInst->isZeroEltSplat())
957 OpInfo = OK_UniformValue;
958
959 const Value *Splat = getSplatValue(V);
960
961 // Check for a splat of a constant or for a non uniform vector of constants
962 // and check if the constant(s) are all powers of two.
963 if (Splat) {
964 // Check for a splat of a uniform value. This is not loop aware, so return
965 // true only for the obviously uniform cases (argument, globalvalue)
967 OpInfo = OK_UniformValue;
968 } else if (isa<Constant>(Splat)) {
970 if (auto *CI = dyn_cast<ConstantInt>(Splat)) {
971 if (CI->getValue().isPowerOf2())
972 OpProps = OP_PowerOf2;
973 else if (CI->getValue().isNegatedPowerOf2())
974 OpProps = OP_NegatedPowerOf2;
975 }
976 }
977 } else if (const auto *CDS = dyn_cast<ConstantDataSequential>(V)) {
979 bool AllPow2 = true, AllNegPow2 = true;
980 for (uint64_t I = 0, E = CDS->getNumElements(); I != E; ++I) {
981 if (auto *CI = dyn_cast<ConstantInt>(CDS->getElementAsConstant(I))) {
982 AllPow2 &= CI->getValue().isPowerOf2();
983 AllNegPow2 &= CI->getValue().isNegatedPowerOf2();
984 if (AllPow2 || AllNegPow2)
985 continue;
986 }
987 AllPow2 = AllNegPow2 = false;
988 break;
989 }
990 OpProps = AllPow2 ? OP_PowerOf2 : OpProps;
991 OpProps = AllNegPow2 ? OP_NegatedPowerOf2 : OpProps;
992 } else if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) {
994 }
995
996 return {OpInfo, OpProps};
997}
998
1002 if (X == Y)
1003 return OpInfoX;
1004 return OpInfoX.mergeWith(getOperandInfo(Y));
1005}
1006
1008 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1009 OperandValueInfo Op1Info, OperandValueInfo Op2Info,
1010 ArrayRef<const Value *> Args, const Instruction *CxtI,
1011 const TargetLibraryInfo *TLibInfo) const {
1012
1013 // Use call cost for frem intructions that have platform specific vector math
1014 // functions, as those will be replaced with calls later by SelectionDAG or
1015 // ReplaceWithVecLib pass.
1016 if (TLibInfo && Opcode == Instruction::FRem) {
1017 VectorType *VecTy = dyn_cast<VectorType>(Ty);
1018 LibFunc Func;
1019 if (VecTy &&
1020 TLibInfo->getLibFunc(Instruction::FRem, Ty->getScalarType(), Func) &&
1021 TLibInfo->isFunctionVectorizable(TLibInfo->getName(Func),
1022 VecTy->getElementCount()))
1023 return getCallInstrCost(nullptr, VecTy, {VecTy, VecTy}, CostKind);
1024 }
1025
1026 InstructionCost Cost = TTIImpl->getArithmeticInstrCost(
1027 Opcode, Ty, CostKind, Op1Info, Op2Info, Args, CxtI);
1028 assert(Cost >= 0 && "TTI should not produce negative costs!");
1029 return Cost;
1030}
1031
1033 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
1034 const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind) const {
1036 TTIImpl->getAltInstrCost(VecTy, Opcode0, Opcode1, OpcodeMask, CostKind);
1037 assert(Cost >= 0 && "TTI should not produce negative costs!");
1038 return Cost;
1039}
1040
1042 ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef<int> Mask,
1043 TTI::TargetCostKind CostKind, int Index, VectorType *SubTp,
1044 ArrayRef<const Value *> Args, const Instruction *CxtI) const {
1045 assert((Mask.empty() || DstTy->isScalableTy() ||
1046 Mask.size() == DstTy->getElementCount().getKnownMinValue()) &&
1047 "Expected the Mask to match the return size if given");
1048 assert(SrcTy->getScalarType() == DstTy->getScalarType() &&
1049 "Expected the same scalar types");
1050 InstructionCost Cost = TTIImpl->getShuffleCost(
1051 Kind, DstTy, SrcTy, Mask, CostKind, Index, SubTp, Args, CxtI);
1052 assert(Cost >= 0 && "TTI should not produce negative costs!");
1053 return Cost;
1054}
1055
1058 if (auto *Cast = dyn_cast<CastInst>(I))
1059 return getPartialReductionExtendKind(Cast->getOpcode());
1060 return PR_None;
1061}
1062
1066 switch (Kind) {
1068 return Instruction::CastOps::ZExt;
1070 return Instruction::CastOps::SExt;
1072 return Instruction::CastOps::FPExt;
1073 default:
1074 break;
1075 }
1076 llvm_unreachable("Unhandled partial reduction extend kind");
1077}
1078
1081 Instruction::CastOps CastOpc) {
1082 switch (CastOpc) {
1083 case Instruction::CastOps::ZExt:
1084 return PR_ZeroExtend;
1085 case Instruction::CastOps::SExt:
1086 return PR_SignExtend;
1087 case Instruction::CastOps::FPExt:
1088 return PR_FPExtend;
1089 default:
1090 return PR_None;
1091 }
1092 llvm_unreachable("Unhandled cast opcode");
1093}
1094
1097 if (!I)
1098 return CastContextHint::None;
1099
1100 auto getLoadStoreKind = [](const Value *V, unsigned LdStOp, unsigned MaskedOp,
1101 unsigned GatScatOp) {
1103 if (!I)
1104 return CastContextHint::None;
1105
1106 if (I->getOpcode() == LdStOp)
1108
1109 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1110 if (II->getIntrinsicID() == MaskedOp)
1112 if (II->getIntrinsicID() == GatScatOp)
1114 }
1115
1117 };
1118
1119 switch (I->getOpcode()) {
1120 case Instruction::ZExt:
1121 case Instruction::SExt:
1122 case Instruction::FPExt:
1123 return getLoadStoreKind(I->getOperand(0), Instruction::Load,
1124 Intrinsic::masked_load, Intrinsic::masked_gather);
1125 case Instruction::Trunc:
1126 case Instruction::FPTrunc:
1127 if (I->hasOneUse())
1128 return getLoadStoreKind(*I->user_begin(), Instruction::Store,
1129 Intrinsic::masked_store,
1130 Intrinsic::masked_scatter);
1131 break;
1132 default:
1133 return CastContextHint::None;
1134 }
1135
1137}
1138
1140 unsigned Opcode, Type *Dst, Type *Src, CastContextHint CCH,
1141 TTI::TargetCostKind CostKind, const Instruction *I) const {
1142 assert((I == nullptr || I->getOpcode() == Opcode) &&
1143 "Opcode should reflect passed instruction.");
1145 TTIImpl->getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
1146 assert(Cost >= 0 && "TTI should not produce negative costs!");
1147 return Cost;
1148}
1149
1151 unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index,
1154 TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index, CostKind);
1155 assert(Cost >= 0 && "TTI should not produce negative costs!");
1156 return Cost;
1157}
1158
1160 unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I) const {
1161 assert((I == nullptr || I->getOpcode() == Opcode) &&
1162 "Opcode should reflect passed instruction.");
1163 InstructionCost Cost = TTIImpl->getCFInstrCost(Opcode, CostKind, I);
1164 assert(Cost >= 0 && "TTI should not produce negative costs!");
1165 return Cost;
1166}
1167
1169 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
1171 OperandValueInfo Op2Info, const Instruction *I) const {
1172 assert((I == nullptr || I->getOpcode() == Opcode) &&
1173 "Opcode should reflect passed instruction.");
1174 InstructionCost Cost = TTIImpl->getCmpSelInstrCost(
1175 Opcode, ValTy, CondTy, VecPred, CostKind, Op1Info, Op2Info, I);
1176 assert(Cost >= 0 && "TTI should not produce negative costs!");
1177 return Cost;
1178}
1179
1181 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
1182 const Value *Op0, const Value *Op1, TTI::VectorInstrContext VIC) const {
1183 assert((Opcode == Instruction::InsertElement ||
1184 Opcode == Instruction::ExtractElement) &&
1185 "Expecting Opcode to be insertelement/extractelement.");
1187 TTIImpl->getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1, VIC);
1188 assert(Cost >= 0 && "TTI should not produce negative costs!");
1189 return Cost;
1190}
1191
1193 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
1194 Value *Scalar, ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx,
1195 TTI::VectorInstrContext VIC) const {
1196 assert((Opcode == Instruction::InsertElement ||
1197 Opcode == Instruction::ExtractElement) &&
1198 "Expecting Opcode to be insertelement/extractelement.");
1199 InstructionCost Cost = TTIImpl->getVectorInstrCost(
1200 Opcode, Val, CostKind, Index, Scalar, ScalarUserAndIdx, VIC);
1201 assert(Cost >= 0 && "TTI should not produce negative costs!");
1202 return Cost;
1203}
1204
1207 unsigned Index, TTI::VectorInstrContext VIC) const {
1208 // FIXME: Assert that Opcode is either InsertElement or ExtractElement.
1209 // This is mentioned in the interface description and respected by all
1210 // callers, but never asserted upon.
1212 TTIImpl->getVectorInstrCost(I, Val, CostKind, Index, VIC);
1213 assert(Cost >= 0 && "TTI should not produce negative costs!");
1214 return Cost;
1215}
1216
1218 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind,
1219 unsigned Index) const {
1221 TTIImpl->getIndexedVectorInstrCostFromEnd(Opcode, Val, CostKind, Index);
1222 assert(Cost >= 0 && "TTI should not produce negative costs!");
1223 return Cost;
1224}
1225
1227 unsigned Opcode, TTI::TargetCostKind CostKind) const {
1228 assert((Opcode == Instruction::InsertValue ||
1229 Opcode == Instruction::ExtractValue) &&
1230 "Expecting Opcode to be insertvalue/extractvalue.");
1231 InstructionCost Cost = TTIImpl->getInsertExtractValueCost(Opcode, CostKind);
1232 assert(Cost >= 0 && "TTI should not produce negative costs!");
1233 return Cost;
1234}
1235
1237 Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts,
1239 InstructionCost Cost = TTIImpl->getReplicationShuffleCost(
1240 EltTy, ReplicationFactor, VF, DemandedDstElts, CostKind);
1241 assert(Cost >= 0 && "TTI should not produce negative costs!");
1242 return Cost;
1243}
1244
1246 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
1248 const Instruction *I) const {
1249 assert((I == nullptr || I->getOpcode() == Opcode) &&
1250 "Opcode should reflect passed instruction.");
1251 InstructionCost Cost = TTIImpl->getMemoryOpCost(
1252 Opcode, Src, Alignment, AddressSpace, CostKind, OpInfo, I);
1253 assert(Cost >= 0 && "TTI should not produce negative costs!");
1254 return Cost;
1255}
1256
1258 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1259 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1260 bool UseMaskForCond, bool UseMaskForGaps) const {
1261 InstructionCost Cost = TTIImpl->getInterleavedMemoryOpCost(
1262 Opcode, VecTy, Factor, Indices, Alignment, AddressSpace, CostKind,
1263 UseMaskForCond, UseMaskForGaps);
1264 assert(Cost >= 0 && "TTI should not produce negative costs!");
1265 return Cost;
1266}
1267
1271 InstructionCost Cost = TTIImpl->getIntrinsicInstrCost(ICA, CostKind);
1272 assert(Cost >= 0 && "TTI should not produce negative costs!");
1273 return Cost;
1274}
1275
1277 const MemIntrinsicCostAttributes &MICA,
1279 InstructionCost Cost = TTIImpl->getMemIntrinsicInstrCost(MICA, CostKind);
1280 assert(Cost >= 0 && "TTI should not produce negative costs!");
1281 return Cost;
1282}
1283
1286 ArrayRef<Type *> Tys,
1288 InstructionCost Cost = TTIImpl->getCallInstrCost(F, RetTy, Tys, CostKind);
1289 assert(Cost >= 0 && "TTI should not produce negative costs!");
1290 return Cost;
1291}
1292
1294 return TTIImpl->getNumberOfParts(Tp);
1295}
1296
1298 Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr,
1301 TTIImpl->getAddressComputationCost(PtrTy, SE, Ptr, CostKind);
1302 assert(Cost >= 0 && "TTI should not produce negative costs!");
1303 return Cost;
1304}
1305
1307 InstructionCost Cost = TTIImpl->getMemcpyCost(I);
1308 assert(Cost >= 0 && "TTI should not produce negative costs!");
1309 return Cost;
1310}
1311
1313 return TTIImpl->getMaxMemIntrinsicInlineSizeThreshold();
1314}
1315
1317 unsigned Opcode, VectorType *Ty, std::optional<FastMathFlags> FMF,
1320 TTIImpl->getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
1321 assert(Cost >= 0 && "TTI should not produce negative costs!");
1322 return Cost;
1323}
1324
1329 TTIImpl->getMinMaxReductionCost(IID, Ty, FMF, CostKind);
1330 assert(Cost >= 0 && "TTI should not produce negative costs!");
1331 return Cost;
1332}
1333
1335 unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty,
1336 std::optional<FastMathFlags> FMF, TTI::TargetCostKind CostKind) const {
1337 return TTIImpl->getExtendedReductionCost(Opcode, IsUnsigned, ResTy, Ty, FMF,
1338 CostKind);
1339}
1340
1342 bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty,
1344 return TTIImpl->getMulAccReductionCost(IsUnsigned, RedOpcode, ResTy, Ty,
1345 CostKind);
1346}
1347
1350 return TTIImpl->getCostOfKeepingLiveOverCall(Tys);
1351}
1352
1354 MemIntrinsicInfo &Info) const {
1355 return TTIImpl->getTgtMemIntrinsic(Inst, Info);
1356}
1357
1359 return TTIImpl->getAtomicMemIntrinsicMaxElementSize();
1360}
1361
1363 IntrinsicInst *Inst, Type *ExpectedType, bool CanCreate) const {
1364 return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType,
1365 CanCreate);
1366}
1367
1369 LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
1370 unsigned DestAddrSpace, Align SrcAlign, Align DestAlign,
1371 std::optional<uint32_t> AtomicElementSize) const {
1372 return TTIImpl->getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace,
1373 DestAddrSpace, SrcAlign, DestAlign,
1374 AtomicElementSize);
1375}
1376
1378 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
1379 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
1380 Align SrcAlign, Align DestAlign,
1381 std::optional<uint32_t> AtomicCpySize) const {
1382 TTIImpl->getMemcpyLoopResidualLoweringType(
1383 OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign,
1384 DestAlign, AtomicCpySize);
1385}
1386
1388 const Function *Callee) const {
1389 return TTIImpl->areInlineCompatible(Caller, Callee);
1390}
1391
1392unsigned
1394 const CallBase &Call,
1395 unsigned DefaultCallPenalty) const {
1396 return TTIImpl->getInlineCallPenalty(F, Call, DefaultCallPenalty);
1397}
1398
1400 const Function *Caller, const Attribute &Attr) const {
1401 return TTIImpl->shouldCopyAttributeWhenOutliningFrom(Caller, Attr);
1402}
1404 const Function *Callee,
1405 ArrayRef<Type *> Types) const {
1406 return TTIImpl->areTypesABICompatible(Caller, Callee, Types);
1407}
1408
1410 Type *Ty) const {
1411 return TTIImpl->isIndexedLoadLegal(Mode, Ty);
1412}
1413
1415 Type *Ty) const {
1416 return TTIImpl->isIndexedStoreLegal(Mode, Ty);
1417}
1418
1420 return TTIImpl->getLoadStoreVecRegBitWidth(AS);
1421}
1422
1424 return TTIImpl->isLegalToVectorizeLoad(LI);
1425}
1426
1428 return TTIImpl->isLegalToVectorizeStore(SI);
1429}
1430
1432 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
1433 return TTIImpl->isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
1434 AddrSpace);
1435}
1436
1438 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
1439 return TTIImpl->isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
1440 AddrSpace);
1441}
1442
1444 const RecurrenceDescriptor &RdxDesc, ElementCount VF) const {
1445 return TTIImpl->isLegalToVectorizeReduction(RdxDesc, VF);
1446}
1447
1449 return TTIImpl->isElementTypeLegalForScalableVector(Ty);
1450}
1451
1453 unsigned LoadSize,
1454 unsigned ChainSizeInBytes,
1455 VectorType *VecTy) const {
1456 return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
1457}
1458
1460 unsigned StoreSize,
1461 unsigned ChainSizeInBytes,
1462 VectorType *VecTy) const {
1463 return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
1464}
1465
1467 bool IsEpilogue) const {
1468 return TTIImpl->preferFixedOverScalableIfEqualCost(IsEpilogue);
1469}
1470
1472 Type *Ty) const {
1473 return TTIImpl->preferInLoopReduction(Kind, Ty);
1474}
1475
1477 return TTIImpl->preferAlternateOpcodeVectorization();
1478}
1479
1481 return TTIImpl->preferPredicatedReductionSelect();
1482}
1483
1485 ElementCount Iters) const {
1486 return TTIImpl->preferEpilogueVectorization(Iters);
1487}
1488
1490 return TTIImpl->shouldConsiderVectorizationRegPressure();
1491}
1492
1495 return TTIImpl->getVPLegalizationStrategy(VPI);
1496}
1497
1499 return TTIImpl->hasArmWideBranch(Thumb);
1500}
1501
1503 return TTIImpl->getFeatureMask(F);
1504}
1505
1507 return TTIImpl->getPriorityMask(F);
1508}
1509
1511 return TTIImpl->isMultiversionedFunction(F);
1512}
1513
1515 return TTIImpl->getMaxNumArgs();
1516}
1517
1519 return TTIImpl->shouldExpandReduction(II);
1520}
1521
1524 const IntrinsicInst *II) const {
1525 return TTIImpl->getPreferredExpandedReductionShuffle(II);
1526}
1527
1529 return TTIImpl->getGISelRematGlobalCost();
1530}
1531
1533 return TTIImpl->getMinTripCountTailFoldingThreshold();
1534}
1535
1537 return TTIImpl->supportsScalableVectors();
1538}
1539
1541 return TTIImpl->enableScalableVectorization();
1542}
1543
1545 return TTIImpl->hasActiveVectorLength();
1546}
1547
1549 Instruction *I, SmallVectorImpl<Use *> &OpsToSink) const {
1550 return TTIImpl->isProfitableToSinkOperands(I, OpsToSink);
1551}
1552
1554 return TTIImpl->isVectorShiftByScalarCheap(Ty);
1555}
1556
1557unsigned
1559 Type *ArrayType) const {
1560 return TTIImpl->getNumBytesToPadGlobalArray(Size, ArrayType);
1561}
1562
1564 const Function &F,
1565 SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const {
1566 return TTIImpl->collectKernelLaunchBounds(F, LB);
1567}
1568
1570 return TTIImpl->allowVectorElementIndexingUsingGEP();
1571}
1572
1574 const SmallBitVector &UniformArgs) const {
1575 return TTIImpl->isUniform(I, UniformArgs);
1576}
1577
1579
1580TargetIRAnalysis::TargetIRAnalysis() : TTICallback(&getDefaultTTI) {}
1581
1583 std::function<Result(const Function &)> TTICallback)
1584 : TTICallback(std::move(TTICallback)) {}
1585
1588 assert(!F.isIntrinsic() && "Should not request TTI for intrinsics");
1589 return TTICallback(F);
1590}
1591
1592AnalysisKey TargetIRAnalysis::Key;
1593
1594TargetIRAnalysis::Result TargetIRAnalysis::getDefaultTTI(const Function &F) {
1595 return Result(F.getDataLayout());
1596}
1597
1598// Register the basic pass.
1600 "Target Transform Information", false, true)
1602
1603void TargetTransformInfoWrapperPass::anchor() {}
1604
1607
1611
1613 FunctionAnalysisManager DummyFAM;
1614 TTI = TIRA.run(F, DummyFAM);
1615 return *TTI;
1616}
1617
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define X(NUM, ENUM, NAME)
Definition ELF.h:849
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static cl::opt< bool > ForceNestedLoop("force-nested-hardware-loop", cl::Hidden, cl::init(false), cl::desc("Force allowance of nested hardware loops"))
static cl::opt< bool > ForceHardwareLoopPHI("force-hardware-loop-phi", cl::Hidden, cl::init(false), cl::desc("Force hardware loop counter to be updated through a phi"))
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
uint64_t IntrinsicInst * II
if(PassOpts->AAPipeline)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file provides helpers for the implementation of a TargetTransformInfo-conforming class.
static cl::opt< unsigned > PredictableBranchThreshold("predictable-branch-threshold", cl::init(99), cl::Hidden, cl::desc("Use this to override the target's predictable branch threshold (%)."))
static cl::opt< bool > EnableReduxCost("costmodel-reduxcost", cl::init(false), cl::Hidden, cl::desc("Recognize reduction patterns."))
static cl::opt< unsigned > MinPageSize("min-page-size", cl::init(0), cl::Hidden, cl::desc("Use this to override the target's minimum page size."))
static cl::opt< unsigned > CacheLineSize("cache-line-size", cl::init(0), cl::Hidden, cl::desc("Use this to override the target cache line size when " "specified by the user."))
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition APInt.h:78
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:131
iterator begin() const
Definition ArrayRef.h:130
Class to represent array types.
A cache of @llvm.assume calls within a function.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM Basic Block Representation.
Definition BasicBlock.h:62
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
Conditional Branch instruction.
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
Class to represent function types.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
ImmutablePass class - This class is used to provide information that does not need to be run.
Definition Pass.h:285
ImmutablePass(char &pid)
Definition Pass.h:287
The core instruction combiner logic.
LLVM_ABI IntrinsicCostAttributes(Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarCost=InstructionCost::getInvalid(), bool TypeBasedOnly=false)
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Information for memory intrinsic cost model.
The optimization diagnostic interface.
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
This class represents a constant integer value.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
An instruction for storing to memory.
Multiway switch.
Analysis pass providing the TargetTransformInfo.
LLVM_ABI Result run(const Function &F, FunctionAnalysisManager &)
LLVM_ABI TargetIRAnalysis()
Default construct a target IR analysis.
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
StringRef getName(LibFunc F) const
bool isFunctionVectorizable(StringRef F, const ElementCount &VF) const
CRTP base class for use as a mix-in that aids implementing a TargetTransformInfo-compatible class.
Wrapper pass for TargetTransformInfo.
TargetTransformInfoWrapperPass()
We must provide a default constructor for the pass but it should never be used.
TargetTransformInfo & getTTI(const Function &F)
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const
LLVM_ABI Value * getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType, bool CanCreate=true) const
LLVM_ABI bool isLegalToVectorizeLoad(LoadInst *LI) const
LLVM_ABI std::optional< unsigned > getVScaleForTuning() const
static LLVM_ABI CastContextHint getCastContextHint(const Instruction *I)
Calculates a CastContextHint from I.
LLVM_ABI unsigned getMaxNumArgs() const
LLVM_ABI bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const
Return false if a AS0 address cannot possibly alias a AS1 address.
LLVM_ABI bool isLegalMaskedScatter(Type *DataType, Align Alignment) const
Return true if the target supports masked scatter.
LLVM_ABI bool shouldBuildLookupTables() const
Return true if switches should be turned into lookup tables for the target.
LLVM_ABI bool isLegalToVectorizeStore(StoreInst *SI) const
LLVM_ABI InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of an extended reduction pattern, similar to getArithmeticReductionCost of an Add/...
LLVM_ABI bool areTypesABICompatible(const Function *Caller, const Function *Callee, ArrayRef< Type * > Types) const
LLVM_ABI bool enableAggressiveInterleaving(bool LoopHasReductions) const
Don't restrict interleaved unrolling to small loops.
LLVM_ABI bool isMultiversionedFunction(const Function &F) const
Returns true if this is an instance of a function with multiple versions.
LLVM_ABI InstructionUniformity getInstructionUniformity(const Value *V) const
Get target-specific uniformity information for an instruction.
LLVM_ABI bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const
Return true if it is faster to check if a floating-point value is NaN (or not-NaN) versus a compariso...
LLVM_ABI bool isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace, MaskKind MaskKind=VariableOrConstantMask) const
Return true if the target supports masked store.
LLVM_ABI bool supportsEfficientVectorElementLoadStore() const
If target has efficient vector element load/store instructions, it can return true here so that inser...
LLVM_ABI unsigned getAssumedAddrSpace(const Value *V) const
LLVM_ABI bool preferAlternateOpcodeVectorization() const
LLVM_ABI bool shouldDropLSRSolutionIfLessProfitable() const
Return true if LSR should drop a found solution if it's calculated to be less profitable than the bas...
LLVM_ABI bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2) const
Return true if LSR cost of C1 is lower than C2.
VectorInstrContext
Represents a hint about the context in which an insert/extract is used.
@ None
The insert/extract is not used with a load/store.
@ Load
The value being inserted comes from a load (InsertElement only).
@ Store
The extracted value is stored (ExtractElement only).
LLVM_ABI unsigned getPrefetchDistance() const
LLVM_ABI Type * getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicElementSize=std::nullopt) const
LLVM_ABI bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const
Return true if the target supports masked expand load.
LLVM_ABI bool prefersVectorizedAddressing() const
Return true if target doesn't mind addresses in vectors.
LLVM_ABI InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo Op1Info={OK_AnyValue, OP_None}, OperandValueInfo Op2Info={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
LLVM_ABI bool hasBranchDivergence(const Function *F=nullptr) const
Return true if branch divergence exists.
LLVM_ABI bool preferEpilogueVectorization(ElementCount Iters) const
Return true if the loop vectorizer should consider vectorizing an otherwise scalar epilogue loop if t...
LLVM_ABI MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const
LLVM_ABI void getUnrollingPreferences(Loop *L, ScalarEvolution &, UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const
Get target-customized preferences for the generic loop unrolling transformation.
LLVM_ABI bool shouldBuildLookupTablesForConstant(Constant *C) const
Return true if switches should be turned into lookup tables containing this constant value for the ta...
LLVM_ABI TailFoldingStyle getPreferredTailFoldingStyle() const
Query the target what the preferred style of tail folding is.
LLVM_ABI bool supportsTailCallFor(const CallBase *CB) const
If target supports tail call on CB.
LLVM_ABI std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
Targets can implement their own combinations for target-specific intrinsics.
LLVM_ABI bool isProfitableLSRChainElement(Instruction *I) const
LLVM_ABI TypeSize getRegisterBitWidth(RegisterKind K) const
MaskKind
Some targets only support masked load/store with a constant mask.
LLVM_ABI unsigned getInlineCallPenalty(const Function *F, const CallBase &Call, unsigned DefaultCallPenalty) const
Returns a penalty for invoking call Call in F.
LLVM_ABI InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
Estimate the overhead of scalarizing operands with the given types.
LLVM_ABI bool hasActiveVectorLength() const
LLVM_ABI bool isExpensiveToSpeculativelyExecute(const Instruction *I) const
Return true if the cost of the instruction is too high to speculatively execute and should be kept be...
LLVM_ABI bool preferFixedOverScalableIfEqualCost(bool IsEpilogue) const
LLVM_ABI bool isLegalMaskedGather(Type *DataType, Align Alignment) const
Return true if the target supports masked gather.
static LLVM_ABI OperandValueInfo commonOperandInfo(const Value *X, const Value *Y)
Collect common data between two OperandValueInfo inputs.
LLVM_ABI InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo OpdInfo={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
LLVM_ABI std::optional< unsigned > getMaxVScale() const
LLVM_ABI InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const
LLVM_ABI bool allowVectorElementIndexingUsingGEP() const
Returns true if GEP should not be used to index into vectors for this target.
LLVM_ABI InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, bool UseMaskForCond=false, bool UseMaskForGaps=false) const
LLVM_ABI bool isSingleThreaded() const
LLVM_ABI std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
Can be used to implement target-specific instruction combining.
LLVM_ABI bool enableOrderedReductions() const
Return true if we should be enabling ordered reductions for the target.
LLVM_ABI unsigned getInliningCostBenefitAnalysisProfitableMultiplier() const
LLVM_ABI InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask={}, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, int Index=0, VectorType *SubTp=nullptr, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const
LLVM_ABI InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
LLVM_ABI InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of vector reduction intrinsics.
LLVM_ABI unsigned getAtomicMemIntrinsicMaxElementSize() const
LLVM_ABI InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
LLVM_ABI InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index=-1, const Value *Op0=nullptr, const Value *Op1=nullptr, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
LLVM_ABI std::pair< KnownBits, KnownBits > computeKnownBitsAddrSpaceCast(unsigned ToAS, const Value &PtrOp) const
LLVM_ABI bool LSRWithInstrQueries() const
Return true if the loop strength reduce pass should make Instruction* based TTI queries to isLegalAdd...
LLVM_ABI unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
LLVM_ABI VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const
static LLVM_ABI PartialReductionExtendKind getPartialReductionExtendKind(Instruction *I)
Get the kind of extension that an instruction represents.
LLVM_ABI bool shouldConsiderVectorizationRegPressure() const
LLVM_ABI bool enableWritePrefetching() const
LLVM_ABI bool shouldTreatInstructionLikeSelect(const Instruction *I) const
Should the Select Optimization pass treat the given instruction like a select, potentially converting...
LLVM_ABI bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
LLVM_ABI bool shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const
LLVM_ABI InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType=nullptr, TargetCostKind CostKind=TCK_SizeAndLatency) const
Estimate the cost of a GEP operation when lowered.
LLVM_ABI bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
LLVM_ABI bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace) const
Return true is the target supports interleaved access for the given vector type VTy,...
LLVM_ABI unsigned getRegUsageForType(Type *Ty) const
Returns the estimated number of registers required to represent Ty.
LLVM_ABI bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const
\Returns true if the target supports broadcasting a load to a vector of type <NumElements x ElementTy...
LLVM_ABI bool isIndexedStoreLegal(enum MemIndexedMode Mode, Type *Ty) const
LLVM_ABI std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
LLVM_ABI InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of an extended reduction pattern, similar to getArithmeticReductionCost of a reduc...
LLVM_ABI unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const
LLVM_ABI InstructionCost getRegisterClassReloadCost(unsigned ClassID, TargetCostKind CostKind) const
LLVM_ABI ReductionShuffle getPreferredExpandedReductionShuffle(const IntrinsicInst *II) const
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
LLVM_ABI unsigned getRegisterClassForType(bool Vector, Type *Ty=nullptr) const
LLVM_ABI bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace=0, Instruction *I=nullptr, int64_t ScalableOffset=0) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
LLVM_ABI PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const
Return hardware support for population count.
LLVM_ABI unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
LLVM_ABI bool isElementTypeLegalForScalableVector(Type *Ty) const
LLVM_ABI bool forceScalarizeMaskedGather(VectorType *Type, Align Alignment) const
Return true if the target forces scalarizing of llvm.masked.gather intrinsics.
LLVM_ABI unsigned getMaxPrefetchIterationsAhead() const
LLVM_ABI bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const
Return true if globals in this address space can have initializers other than undef.
LLVM_ABI ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const
LLVM_ABI InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TargetCostKind CostKind) const
LLVM_ABI bool enableMaskedInterleavedAccessVectorization() const
Enable matching of interleaved access groups that contain predicated accesses or gaps and therefore v...
LLVM_ABI InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty, TargetCostKind CostKind, Instruction *Inst=nullptr) const
Return the expected cost of materialization for the given integer immediate of the specified type for...
LLVM_ABI bool isLegalStridedLoadStore(Type *DataType, Align Alignment) const
Return true if the target supports strided load.
LLVM_ABI TargetTransformInfo & operator=(TargetTransformInfo &&RHS)
LLVM_ABI InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF=FastMathFlags(), TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
LLVM_ABI InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr, const TargetLibraryInfo *TLibInfo=nullptr) const
This is an approximation of reciprocal throughput of a math/logic op.
LLVM_ABI bool enableSelectOptimize() const
Should the Select Optimization pass be enabled and ran.
LLVM_ABI bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
Return any intrinsic address operand indexes which may be rewritten if they use a flat address space ...
OperandValueProperties
Additional properties of an operand's values.
LLVM_ABI int getInliningLastCallToStaticBonus() const
LLVM_ABI InstructionCost getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const PointersChainInfo &Info, Type *AccessTy, TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Estimate the cost of a chain of pointers (typically pointer operands of a chain of loads or stores wi...
LLVM_ABI bool isIndexedLoadLegal(enum MemIndexedMode Mode, Type *Ty) const
LLVM_ABI unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const
LLVM_ABI unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy, Align Alignment, unsigned AddrSpace) const
LLVM_ABI bool isLegalICmpImmediate(int64_t Imm) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
LLVM_ABI bool isTypeLegal(Type *Ty) const
Return true if this type is legal.
LLVM_ABI bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, ElementCount VF) const
LLVM_ABI std::optional< unsigned > getCacheAssociativity(CacheLevel Level) const
LLVM_ABI bool isLegalNTLoad(Type *DataType, Align Alignment) const
Return true if the target supports nontemporal load.
LLVM_ABI bool isUniform(const Instruction *I, const SmallBitVector &UniformArgs) const
Determine if an instruction with Custom uniformity can be proven uniform based on which operands are ...
LLVM_ABI InstructionCost getMemcpyCost(const Instruction *I) const
LLVM_ABI unsigned adjustInliningThreshold(const CallBase *CB) const
LLVM_ABI bool isLegalAddImmediate(int64_t Imm) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
LLVM_ABI bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const
Identifies if the vector form of the intrinsic that returns a struct is overloaded at the struct elem...
LLVM_ABI unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
LLVM_ABI InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const
LLVM_ABI bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const
LLVM_ABI Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
Rewrite intrinsic call II such that OldV will be replaced with NewV, which has a different address sp...
LLVM_ABI InstructionCost getCostOfKeepingLiveOverCall(ArrayRef< Type * > Tys) const
LLVM_ABI bool canSaveCmp(Loop *L, CondBrInst **BI, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, TargetLibraryInfo *LibInfo) const
Return true if the target can save a compare for loop count, for example hardware loop saves a compar...
LLVM_ABI unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Some HW prefetchers can handle accesses up to a certain constant stride.
LLVM_ABI bool shouldPrefetchAddressSpace(unsigned AS) const
LLVM_ABI InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TargetCostKind CostKind) const
Return the expected cost of materializing for the given integer immediate of the specified type.
LLVM_ABI unsigned getMinVectorRegisterBitWidth() const
LLVM_ABI InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const
LLVM_ABI bool isLegalNTStore(Type *DataType, Align Alignment) const
Return true if the target supports nontemporal store.
LLVM_ABI unsigned getFlatAddressSpace() const
Returns the address space ID for a target's 'flat' address space.
LLVM_ABI bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const
It can be advantageous to detach complex constants from their uses to make their generation cheaper.
LLVM_ABI bool hasArmWideBranch(bool Thumb) const
LLVM_ABI const char * getRegisterClassName(unsigned ClassID) const
LLVM_ABI bool shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const
LLVM_ABI APInt getPriorityMask(const Function &F) const
Returns a bitmask constructed from the target-features or fmv-features metadata of a function corresp...
LLVM_ABI BranchProbability getPredictableBranchThreshold() const
If a branch or a select condition is skewed in one direction by more than this factor,...
LLVM_ABI TargetTransformInfo(std::unique_ptr< const TargetTransformInfoImplBase > Impl)
Construct a TTI object using a type implementing the Concept API below.
LLVM_ABI bool preferInLoopReduction(RecurKind Kind, Type *Ty) const
LLVM_ABI unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const
LLVM_ABI bool hasConditionalLoadStoreForType(Type *Ty, bool IsStore) const
LLVM_ABI unsigned getCacheLineSize() const
LLVM_ABI bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace=0, Align Alignment=Align(1), unsigned *Fast=nullptr) const
Determine if the target supports unaligned memory accesses.
LLVM_ABI bool shouldCopyAttributeWhenOutliningFrom(const Function *Caller, const Attribute &Attr) const
LLVM_ABI APInt getAddrSpaceCastPreservedPtrMask(unsigned SrcAS, unsigned DstAS) const
Returns a mask indicating which bits of a pointer remain unchanged when casting between address space...
LLVM_ABI int getInlinerVectorBonusPercent() const
LLVM_ABI unsigned getEpilogueVectorizationMinVF() const
LLVM_ABI void collectKernelLaunchBounds(const Function &F, SmallVectorImpl< std::pair< StringRef, int64_t > > &LB) const
Collect kernel launch bounds for F into LB.
PopcntSupportKind
Flags indicating the kind of support for population count.
LLVM_ABI bool preferPredicatedReductionSelect() const
LLVM_ABI InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty) const
Return the expected cost for the given integer when optimising for size.
LLVM_ABI AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const
Return the preferred addressing mode LSR should make efforts to generate.
LLVM_ABI bool isLoweredToCall(const Function *F) const
Test whether calls to a function lower to actual program function calls.
LLVM_ABI bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
LLVM_ABI bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const
Query the target whether it would be profitable to convert the given loop into a hardware loop.
LLVM_ABI unsigned getInliningThresholdMultiplier() const
LLVM_ABI InstructionCost getBranchMispredictPenalty() const
Returns estimated penalty of a branch misprediction in latency.
LLVM_ABI unsigned getNumberOfRegisters(unsigned ClassID) const
LLVM_ABI bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask) const
Return true if this is an alternating opcode pattern that can be lowered to a single instruction on t...
LLVM_ABI bool isProfitableToHoist(Instruction *I) const
Return true if it is profitable to hoist instruction in the then/else to before if.
LLVM_ABI bool supportsScalableVectors() const
LLVM_ABI bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const
Return true if the given instruction (assumed to be a memory access instruction) has a volatile varia...
LLVM_ABI bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) const
Return true if the target supports masked compress store.
LLVM_ABI std::optional< unsigned > getMinPageSize() const
LLVM_ABI bool isFPVectorizationPotentiallyUnsafe() const
Indicate that it is potentially unsafe to automatically vectorize floating-point operations because t...
LLVM_ABI InstructionCost getInsertExtractValueCost(unsigned Opcode, TTI::TargetCostKind CostKind) const
LLVM_ABI bool shouldBuildRelLookupTables() const
Return true if lookup tables should be turned into relative lookup tables.
LLVM_ABI std::optional< unsigned > getCacheSize(CacheLevel Level) const
LLVM_ABI std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
Can be used to implement target-specific instruction combining.
LLVM_ABI bool isLegalAddScalableImmediate(int64_t Imm) const
Return true if adding the specified scalable immediate is legal, that is the target has add instructi...
LLVM_ABI bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const
Identifies if the vector form of the intrinsic has a scalar operand.
LLVM_ABI bool hasDivRemOp(Type *DataType, bool IsSigned) const
Return true if the target has a unified operation to calculate division and remainder.
LLVM_ABI InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Returns the cost estimation for alternating opcode pattern that can be lowered to a single instructio...
LLVM_ABI bool enableInterleavedAccessVectorization() const
Enable matching of interleaved access groups.
LLVM_ABI unsigned getMinTripCountTailFoldingThreshold() const
LLVM_ABI InstructionCost getPartialReductionCost(unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType, ElementCount VF, PartialReductionExtendKind OpAExtend, PartialReductionExtendKind OpBExtend, std::optional< unsigned > BinOp, TTI::TargetCostKind CostKind, std::optional< FastMathFlags > FMF) const
LLVM_ABI InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
LLVM_ABI unsigned getMaxInterleaveFactor(ElementCount VF) const
LLVM_ABI bool enableScalableVectorization() const
LLVM_ABI bool useFastCCForInternalCall(Function &F) const
Return true if the input function is internal, should use fastcc calling convention.
LLVM_ABI bool isVectorShiftByScalarCheap(Type *Ty) const
Return true if it's significantly cheaper to shift a vector by a uniform scalar than by an amount whi...
LLVM_ABI bool isNumRegsMajorCostOfLSR() const
Return true if LSR major cost is number of registers.
LLVM_ABI unsigned getInliningCostBenefitAnalysisSavingsMultiplier() const
LLVM_ABI bool isLegalMaskedVectorHistogram(Type *AddrType, Type *DataType) const
LLVM_ABI unsigned getGISelRematGlobalCost() const
LLVM_ABI unsigned getNumBytesToPadGlobalArray(unsigned Size, Type *ArrayType) const
static LLVM_ABI Instruction::CastOps getOpcodeForPartialReductionExtendKind(PartialReductionExtendKind Kind)
Get the cast opcode for an extension kind.
MemIndexedMode
The type of load/store indexing.
LLVM_ABI bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace, MaskKind MaskKind=VariableOrConstantMask) const
Return true if the target supports masked load.
LLVM_ABI InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const
LLVM_ABI bool areInlineCompatible(const Function *Caller, const Function *Callee) const
LLVM_ABI bool useColdCCForColdCall(Function &F) const
Return true if the input function which is cold at all call sites, should use coldcc calling conventi...
LLVM_ABI InstructionCost getFPOpCost(Type *Ty) const
Return the expected cost of supporting the floating point operation of the specified type.
LLVM_ABI bool supportsTailCalls() const
If the target supports tail calls.
LLVM_ABI bool canMacroFuseCmp() const
Return true if the target can fuse a compare and branch.
LLVM_ABI bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Query the target whether the specified address space cast from FromAS to ToAS is valid.
LLVM_ABI unsigned getNumberOfParts(Type *Tp) const
AddressingModeKind
Which addressing mode Loop Strength Reduction will try to generate.
LLVM_ABI InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace=0) const
Return the cost of the scaling factor used in the addressing mode represented by AM for this target,...
LLVM_ABI bool isTruncateFree(Type *Ty1, Type *Ty2) const
Return true if it's free to truncate a value of type Ty1 to type Ty2.
LLVM_ABI bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const
Return true if sinking I's operands to the same basic block as I is profitable, e....
LLVM_ABI void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicCpySize=std::nullopt) const
LLVM_ABI bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const
Query the target whether it would be prefered to create a predicated vector loop, which can avoid the...
LLVM_ABI bool forceScalarizeMaskedScatter(VectorType *Type, Align Alignment) const
Return true if the target forces scalarizing of llvm.masked.scatter intrinsics.
LLVM_ABI bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const
Identifies if the vector form of the intrinsic is overloaded on the type of the operand at index OpdI...
static VectorInstrContext getVectorInstrContextHint(const Instruction *I)
Calculates a VectorInstrContext from I.
LLVM_ABI bool haveFastSqrt(Type *Ty) const
Return true if the hardware has a fast square-root instruction.
LLVM_ABI bool shouldExpandReduction(const IntrinsicInst *II) const
LLVM_ABI InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
Estimate the overhead of scalarizing an instruction.
LLVM_ABI uint64_t getMaxMemIntrinsicInlineSizeThreshold() const
Returns the maximum memset / memcpy size in bytes that still makes it profitable to inline the call.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
LLVM_ABI APInt getFeatureMask(const Function &F) const
Returns a bitmask constructed from the target-features or fmv-features metadata of a function corresp...
LLVM_ABI void getPeelingPreferences(Loop *L, ScalarEvolution &SE, PeelingPreferences &PP) const
Get target-customized preferences for the generic loop peeling transformation.
LLVM_ABI InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency) const
LLVM_ABI InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
CastContextHint
Represents a hint about the context in which a cast is used.
@ Masked
The cast is used with a masked load/store.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
@ GatherScatter
The cast is used with a gather/scatter.
LLVM_ABI InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const
LLVM_ABI InstructionCost getRegisterClassSpillCost(unsigned ClassID, TargetCostKind CostKind) const
OperandValueKind
Additional information about an operand's possible values.
CacheLevel
The possible cache levels.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:65
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:313
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
@ Length
Definition DWP.cpp:532
InstructionCost Cost
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)
Return true if the control flow in RPOTraversal is irreducible.
Definition CFG.h:154
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI ImmutablePass * createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA)
Create an analysis pass wrapper around a TTI object.
RecurKind
These are the kinds of recurrences that we support.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1917
auto predecessors(const MachineBasicBlock *BB)
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
InstructionUniformity
Enum describing how instructions behave with respect to uniformity and divergence,...
Definition Uniformity.h:18
@ AlwaysUniform
The result values are always uniform.
Definition Uniformity.h:23
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
Attributes of a target dependent hardware loop.
LLVM_ABI bool canAnalyze(LoopInfo &LI)
LLVM_ABI bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, bool ForceNestedLoop=false, bool ForceHardwareLoopPHI=false)
Information about a load/store intrinsic defined by the target.
Returns options for expansion of memcmp. IsZeroCmp is.
OperandValueInfo mergeWith(const OperandValueInfo OpInfoY)
Describe known properties for a set of pointers.
Parameters that control the generic loop unrolling transformation.