LLVM 23.0.0git
TargetTransformInfoImpl.h
Go to the documentation of this file.
1//===- TargetTransformInfoImpl.h --------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file provides helpers for the implementation of
10/// a TargetTransformInfo-conforming class.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
15#define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
16
21#include "llvm/IR/DataLayout.h"
24#include "llvm/IR/Operator.h"
26#include <optional>
27#include <utility>
28
29namespace llvm {
30
31class Function;
32
33/// Base class for use as a mix-in that aids implementing
34/// a TargetTransformInfo-compatible class.
36
37protected:
39
40 const DataLayout &DL;
41
43
44public:
46
47 // Provide value semantics. MSVC requires that we spell all of these out.
50
51 virtual const DataLayout &getDataLayout() const { return DL; }
52
53 // FIXME: It looks like this implementation is dead. All clients appear to
54 // use the (non-const) version from `TargetTransformInfoImplCRTPBase`.
55 virtual InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
57 Type *AccessType,
59 // In the basic model, we just assume that all-constant GEPs will be folded
60 // into their uses via addressing modes.
61 for (const Value *Operand : Operands)
62 if (!isa<Constant>(Operand))
63 return TTI::TCC_Basic;
64
65 return TTI::TCC_Free;
66 }
67
68 virtual InstructionCost
70 const TTI::PointersChainInfo &Info, Type *AccessTy,
72 llvm_unreachable("Not implemented");
73 }
74
75 virtual unsigned
78 BlockFrequencyInfo *BFI) const {
79 (void)PSI;
80 (void)BFI;
81 JTSize = 0;
82 return SI.getNumCases();
83 }
84
85 virtual InstructionCost
88 llvm_unreachable("Not implemented");
89 }
90
91 virtual unsigned getInliningThresholdMultiplier() const { return 1; }
93 return 8;
94 }
96 return 8;
97 }
99 // This is the value of InlineConstants::LastCallToStaticBonus before it was
100 // removed along with the introduction of this function.
101 return 15000;
102 }
103 virtual unsigned adjustInliningThreshold(const CallBase *CB) const {
104 return 0;
105 }
106 virtual unsigned getCallerAllocaCost(const CallBase *CB,
107 const AllocaInst *AI) const {
108 return 0;
109 };
110
111 virtual int getInlinerVectorBonusPercent() const { return 150; }
112
114 return TTI::TCC_Expensive;
115 }
116
117 virtual uint64_t getMaxMemIntrinsicInlineSizeThreshold() const { return 64; }
118
119 // Although this default value is arbitrary, it is not random. It is assumed
120 // that a condition that evaluates the same way by a higher percentage than
121 // this is best represented as control flow. Therefore, the default value N
122 // should be set such that the win from N% correct executions is greater than
123 // the loss from (100 - N)% mispredicted executions for the majority of
124 // intended targets.
126 return BranchProbability(99, 100);
127 }
128
129 virtual InstructionCost getBranchMispredictPenalty() const { return 0; }
130
131 virtual bool hasBranchDivergence(const Function *F = nullptr) const {
132 return false;
133 }
134
138
139 virtual bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
140 return false;
141 }
142
143 virtual bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const {
144 return true;
145 }
146
147 virtual unsigned getFlatAddressSpace() const { return -1; }
148
150 Intrinsic::ID IID) const {
151 return false;
152 }
153
154 virtual bool isNoopAddrSpaceCast(unsigned, unsigned) const { return false; }
155
156 virtual std::pair<KnownBits, KnownBits>
157 computeKnownBitsAddrSpaceCast(unsigned ToAS, const Value &PtrOp) const {
158 const Type *PtrTy = PtrOp.getType();
159 assert(PtrTy->isPtrOrPtrVectorTy() &&
160 "expected pointer or pointer vector type");
161 unsigned FromAS = PtrTy->getPointerAddressSpace();
162
163 if (DL.isNonIntegralAddressSpace(FromAS))
164 return std::pair(KnownBits(DL.getPointerSizeInBits(FromAS)),
165 KnownBits(DL.getPointerSizeInBits(ToAS)));
166
167 KnownBits FromPtrBits;
168 if (const AddrSpaceCastInst *CastI = dyn_cast<AddrSpaceCastInst>(&PtrOp)) {
169 std::pair<KnownBits, KnownBits> KB = computeKnownBitsAddrSpaceCast(
170 CastI->getDestAddressSpace(), *CastI->getPointerOperand());
171 FromPtrBits = KB.second;
172 } else {
173 FromPtrBits = computeKnownBits(&PtrOp, DL, nullptr);
174 }
175
176 KnownBits ToPtrBits =
177 computeKnownBitsAddrSpaceCast(FromAS, ToAS, FromPtrBits);
178
179 return {FromPtrBits, ToPtrBits};
180 }
181
182 virtual KnownBits
183 computeKnownBitsAddrSpaceCast(unsigned FromAS, unsigned ToAS,
184 const KnownBits &FromPtrBits) const {
185 unsigned ToASBitSize = DL.getPointerSizeInBits(ToAS);
186
187 if (DL.isNonIntegralAddressSpace(FromAS))
188 return KnownBits(ToASBitSize);
189
190 // By default, we assume that all valid "larger" (e.g. 64-bit) to "smaller"
191 // (e.g. 32-bit) casts work by chopping off the high bits.
192 // By default, we do not assume that null results in null again.
193 return FromPtrBits.anyextOrTrunc(ToASBitSize);
194 }
195
197 unsigned DstAS) const {
198 return {DL.getPointerSizeInBits(SrcAS), 0};
199 }
200
201 virtual bool
203 return AS == 0;
204 };
205
206 virtual unsigned getAssumedAddrSpace(const Value *V) const { return -1; }
207
208 virtual bool isSingleThreaded() const { return false; }
209
210 virtual std::pair<const Value *, unsigned>
212 return std::make_pair(nullptr, -1);
213 }
214
216 Value *OldV,
217 Value *NewV) const {
218 return nullptr;
219 }
220
221 virtual bool isLoweredToCall(const Function *F) const {
222 assert(F && "A concrete function must be provided to this routine.");
223
224 // FIXME: These should almost certainly not be handled here, and instead
225 // handled with the help of TLI or the target itself. This was largely
226 // ported from existing analysis heuristics here so that such refactorings
227 // can take place in the future.
228
229 if (F->isIntrinsic())
230 return false;
231
232 if (F->hasLocalLinkage() || !F->hasName())
233 return true;
234
235 StringRef Name = F->getName();
236
237 // These will all likely lower to a single selection DAG node.
238 // clang-format off
239 if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
240 Name == "fabs" || Name == "fabsf" || Name == "fabsl" ||
241 Name == "fmin" || Name == "fminf" || Name == "fminl" ||
242 Name == "fmax" || Name == "fmaxf" || Name == "fmaxl" ||
243 Name == "sin" || Name == "sinf" || Name == "sinl" ||
244 Name == "cos" || Name == "cosf" || Name == "cosl" ||
245 Name == "tan" || Name == "tanf" || Name == "tanl" ||
246 Name == "asin" || Name == "asinf" || Name == "asinl" ||
247 Name == "acos" || Name == "acosf" || Name == "acosl" ||
248 Name == "atan" || Name == "atanf" || Name == "atanl" ||
249 Name == "atan2" || Name == "atan2f" || Name == "atan2l"||
250 Name == "sinh" || Name == "sinhf" || Name == "sinhl" ||
251 Name == "cosh" || Name == "coshf" || Name == "coshl" ||
252 Name == "tanh" || Name == "tanhf" || Name == "tanhl" ||
253 Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl" ||
254 Name == "exp10" || Name == "exp10l" || Name == "exp10f")
255 return false;
256 // clang-format on
257 // These are all likely to be optimized into something smaller.
258 if (Name == "pow" || Name == "powf" || Name == "powl" || Name == "exp2" ||
259 Name == "exp2l" || Name == "exp2f" || Name == "floor" ||
260 Name == "floorf" || Name == "ceil" || Name == "round" ||
261 Name == "ffs" || Name == "ffsl" || Name == "abs" || Name == "labs" ||
262 Name == "llabs")
263 return false;
264
265 return true;
266 }
267
269 AssumptionCache &AC,
270 TargetLibraryInfo *LibInfo,
271 HardwareLoopInfo &HWLoopInfo) const {
272 return false;
273 }
274
275 virtual unsigned getEpilogueVectorizationMinVF() const { return 16; }
276
278 return false;
279 }
280
284
285 virtual std::optional<Instruction *>
287 return std::nullopt;
288 }
289
290 virtual std::optional<Value *>
292 APInt DemandedMask, KnownBits &Known,
293 bool &KnownBitsComputed) const {
294 return std::nullopt;
295 }
296
297 virtual std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
298 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
299 APInt &UndefElts2, APInt &UndefElts3,
300 std::function<void(Instruction *, unsigned, APInt, APInt &)>
301 SimplifyAndSetOp) const {
302 return std::nullopt;
303 }
304
308
311
312 virtual bool isLegalAddImmediate(int64_t Imm) const { return false; }
313
314 virtual bool isLegalAddScalableImmediate(int64_t Imm) const { return false; }
315
316 virtual bool isLegalICmpImmediate(int64_t Imm) const { return false; }
317
318 virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
319 int64_t BaseOffset, bool HasBaseReg,
320 int64_t Scale, unsigned AddrSpace,
321 Instruction *I = nullptr,
322 int64_t ScalableOffset = 0) const {
323 // Guess that only reg and reg+reg addressing is allowed. This heuristic is
324 // taken from the implementation of LSR.
325 return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
326 }
327
328 virtual bool isLSRCostLess(const TTI::LSRCost &C1,
329 const TTI::LSRCost &C2) const {
330 return std::tie(C1.NumRegs, C1.AddRecCost, C1.NumIVMuls, C1.NumBaseAdds,
331 C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
332 std::tie(C2.NumRegs, C2.AddRecCost, C2.NumIVMuls, C2.NumBaseAdds,
333 C2.ScaleCost, C2.ImmCost, C2.SetupCost);
334 }
335
336 virtual bool isNumRegsMajorCostOfLSR() const { return true; }
337
338 virtual bool shouldDropLSRSolutionIfLessProfitable() const { return false; }
339
341 return false;
342 }
343
344 virtual bool canMacroFuseCmp() const { return false; }
345
346 virtual bool canSaveCmp(Loop *L, CondBrInst **BI, ScalarEvolution *SE,
348 TargetLibraryInfo *LibInfo) const {
349 return false;
350 }
351
354 return TTI::AMK_None;
355 }
356
357 virtual bool isLegalMaskedStore(Type *DataType, Align Alignment,
358 unsigned AddressSpace,
359 TTI::MaskKind MaskKind) const {
360 return false;
361 }
362
363 virtual bool isLegalMaskedLoad(Type *DataType, Align Alignment,
364 unsigned AddressSpace,
365 TTI::MaskKind MaskKind) const {
366 return false;
367 }
368
369 virtual bool isLegalNTStore(Type *DataType, Align Alignment) const {
370 // By default, assume nontemporal memory stores are available for stores
371 // that are aligned and have a size that is a power of 2.
372 unsigned DataSize = DL.getTypeStoreSize(DataType);
373 return Alignment >= DataSize && isPowerOf2_32(DataSize);
374 }
375
376 virtual bool isLegalNTLoad(Type *DataType, Align Alignment) const {
377 // By default, assume nontemporal memory loads are available for loads that
378 // are aligned and have a size that is a power of 2.
379 unsigned DataSize = DL.getTypeStoreSize(DataType);
380 return Alignment >= DataSize && isPowerOf2_32(DataSize);
381 }
382
383 virtual bool isLegalBroadcastLoad(Type *ElementTy,
384 ElementCount NumElements) const {
385 return false;
386 }
387
388 virtual bool isLegalMaskedScatter(Type *DataType, Align Alignment) const {
389 return false;
390 }
391
392 virtual bool isLegalMaskedGather(Type *DataType, Align Alignment) const {
393 return false;
394 }
395
397 Align Alignment) const {
398 return false;
399 }
400
402 Align Alignment) const {
403 return false;
404 }
405
406 virtual bool isLegalMaskedCompressStore(Type *DataType,
407 Align Alignment) const {
408 return false;
409 }
410
411 virtual bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0,
412 unsigned Opcode1,
413 const SmallBitVector &OpcodeMask) const {
414 return false;
415 }
416
417 virtual bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const {
418 return false;
419 }
420
421 virtual bool isLegalStridedLoadStore(Type *DataType, Align Alignment) const {
422 return false;
423 }
424
425 virtual bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor,
426 Align Alignment,
427 unsigned AddrSpace) const {
428 return false;
429 }
430
431 virtual bool isLegalMaskedVectorHistogram(Type *AddrType,
432 Type *DataType) const {
433 return false;
434 }
435
436 virtual bool enableOrderedReductions() const { return false; }
437
438 virtual bool hasDivRemOp(Type *DataType, bool IsSigned) const {
439 return false;
440 }
441
442 virtual bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const {
443 return false;
444 }
445
446 virtual bool prefersVectorizedAddressing() const { return true; }
447
449 StackOffset BaseOffset,
450 bool HasBaseReg, int64_t Scale,
451 unsigned AddrSpace) const {
452 // Guess that all legal addressing mode are free.
453 if (isLegalAddressingMode(Ty, BaseGV, BaseOffset.getFixed(), HasBaseReg,
454 Scale, AddrSpace, /*I=*/nullptr,
455 BaseOffset.getScalable()))
456 return 0;
458 }
459
460 virtual bool LSRWithInstrQueries() const { return false; }
461
462 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const { return false; }
463
464 virtual bool isProfitableToHoist(Instruction *I) const { return true; }
465
466 virtual bool useAA() const { return false; }
467
468 virtual bool isTypeLegal(Type *Ty) const { return false; }
469
470 virtual unsigned getRegUsageForType(Type *Ty) const { return 1; }
471
472 virtual bool shouldBuildLookupTables() const { return true; }
473
475 return true;
476 }
477
478 virtual bool shouldBuildRelLookupTables() const { return false; }
479
480 virtual bool useColdCCForColdCall(Function &F) const { return false; }
481
482 virtual bool useFastCCForInternalCall(Function &F) const { return true; }
483
485 return false;
486 }
487
489 unsigned ScalarOpdIdx) const {
490 return false;
491 }
492
494 int OpdIdx) const {
495 return OpdIdx == -1;
496 }
497
498 virtual bool
500 int RetIdx) const {
501 return RetIdx == 0;
502 }
503
505 VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
506 TTI::TargetCostKind CostKind, bool ForPoisonSrc = true,
507 ArrayRef<Value *> VL = {},
509 // Default implementation returns 0.
510 // BasicTTIImpl provides the actual implementation.
511 return 0;
512 }
513
519
520 virtual bool supportsEfficientVectorElementLoadStore() const { return false; }
521
522 virtual bool supportsTailCalls() const { return true; }
523
524 virtual bool supportsTailCallFor(const CallBase *CB) const {
525 llvm_unreachable("Not implemented");
526 }
527
528 virtual bool enableAggressiveInterleaving(bool LoopHasReductions) const {
529 return false;
530 }
531
533 enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
534 return {};
535 }
536
537 virtual bool enableSelectOptimize() const { return true; }
538
539 virtual bool shouldTreatInstructionLikeSelect(const Instruction *I) const {
540 // A select with two constant operands will usually be better left as a
541 // select.
542 using namespace llvm::PatternMatch;
544 return false;
545 // If the select is a logical-and/logical-or then it is better treated as a
546 // and/or by the backend.
547 return isa<SelectInst>(I) &&
550 }
551
552 virtual bool enableInterleavedAccessVectorization() const { return false; }
553
555 return false;
556 }
557
558 virtual bool isFPVectorizationPotentiallyUnsafe() const { return false; }
559
561 unsigned BitWidth,
562 unsigned AddressSpace,
563 Align Alignment,
564 unsigned *Fast) const {
565 return false;
566 }
567
569 getPopcntSupport(unsigned IntTyWidthInBit) const {
570 return TTI::PSK_Software;
571 }
572
573 virtual bool haveFastSqrt(Type *Ty) const { return false; }
574
576 return true;
577 }
578
579 virtual bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const { return true; }
580
581 virtual InstructionCost getFPOpCost(Type *Ty) const {
583 }
584
585 virtual InstructionCost getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
586 const APInt &Imm,
587 Type *Ty) const {
588 return 0;
589 }
590
591 virtual InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
593 return TTI::TCC_Basic;
594 }
595
596 virtual InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
597 const APInt &Imm, Type *Ty,
599 Instruction *Inst = nullptr) const {
600 return TTI::TCC_Free;
601 }
602
603 virtual InstructionCost
604 getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
605 Type *Ty, TTI::TargetCostKind CostKind) const {
606 return TTI::TCC_Free;
607 }
608
610 const Function &Fn) const {
611 return false;
612 }
613
614 virtual unsigned getNumberOfRegisters(unsigned ClassID) const { return 8; }
615 virtual bool hasConditionalLoadStoreForType(Type *Ty, bool IsStore) const {
616 return false;
617 }
618
619 virtual unsigned getRegisterClassForType(bool Vector,
620 Type *Ty = nullptr) const {
621 return Vector ? 1 : 0;
622 }
623
624 virtual const char *getRegisterClassName(unsigned ClassID) const {
625 switch (ClassID) {
626 default:
627 return "Generic::Unknown Register Class";
628 case 0:
629 return "Generic::ScalarRC";
630 case 1:
631 return "Generic::VectorRC";
632 }
633 }
634
635 virtual InstructionCost
638 return TTI::TCC_Basic;
639 }
640
641 virtual InstructionCost
644 return TTI::TCC_Basic;
645 }
646
647 virtual TypeSize
651
652 virtual unsigned getMinVectorRegisterBitWidth() const { return 128; }
653
654 virtual std::optional<unsigned> getMaxVScale() const { return std::nullopt; }
655 virtual std::optional<unsigned> getVScaleForTuning() const {
656 return std::nullopt;
657 }
658
659 virtual bool
663
664 virtual ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const {
665 return ElementCount::get(0, IsScalable);
666 }
667
668 virtual unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
669 return 0;
670 }
671 virtual unsigned getStoreMinimumVF(unsigned VF, Type *, Type *, Align,
672 unsigned) const {
673 return VF;
674 }
675
677 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {
678 AllowPromotionWithoutCommonHeader = false;
679 return false;
680 }
681
682 virtual unsigned getCacheLineSize() const { return 0; }
683 virtual std::optional<unsigned>
685 switch (Level) {
687 [[fallthrough]];
689 return std::nullopt;
690 }
691 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
692 }
693
694 virtual std::optional<unsigned>
696 switch (Level) {
698 [[fallthrough]];
700 return std::nullopt;
701 }
702
703 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
704 }
705
706 virtual std::optional<unsigned> getMinPageSize() const { return {}; }
707
708 virtual unsigned getPrefetchDistance() const { return 0; }
709 virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses,
710 unsigned NumStridedMemAccesses,
711 unsigned NumPrefetches,
712 bool HasCall) const {
713 return 1;
714 }
715 virtual unsigned getMaxPrefetchIterationsAhead() const { return UINT_MAX; }
716 virtual bool enableWritePrefetching() const { return false; }
717 virtual bool shouldPrefetchAddressSpace(unsigned AS) const { return !AS; }
718
720 unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType,
722 TTI::PartialReductionExtendKind OpBExtend, std::optional<unsigned> BinOp,
723 TTI::TargetCostKind CostKind, std::optional<FastMathFlags> FMF) const {
725 }
726
727 virtual unsigned getMaxInterleaveFactor(ElementCount VF) const { return 1; }
728
730 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
732 ArrayRef<const Value *> Args, const Instruction *CxtI = nullptr) const {
733 // Widenable conditions will eventually lower into constants, so some
734 // operations with them will be trivially optimized away.
735 auto IsWidenableCondition = [](const Value *V) {
736 if (auto *II = dyn_cast<IntrinsicInst>(V))
737 if (II->getIntrinsicID() == Intrinsic::experimental_widenable_condition)
738 return true;
739 return false;
740 };
741 // FIXME: A number of transformation tests seem to require these values
742 // which seems a little odd for how arbitary there are.
743 switch (Opcode) {
744 default:
745 break;
746 case Instruction::FDiv:
747 case Instruction::FRem:
748 case Instruction::SDiv:
749 case Instruction::SRem:
750 case Instruction::UDiv:
751 case Instruction::URem:
752 // FIXME: Unlikely to be true for CodeSize.
753 return TTI::TCC_Expensive;
754 case Instruction::And:
755 case Instruction::Or:
756 if (any_of(Args, IsWidenableCondition))
757 return TTI::TCC_Free;
758 break;
759 }
760
761 // Assume a 3cy latency for fp arithmetic ops.
763 if (Ty->getScalarType()->isFloatingPointTy())
764 return 3;
765
766 return 1;
767 }
768
769 virtual InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0,
770 unsigned Opcode1,
771 const SmallBitVector &OpcodeMask,
774 }
775
776 virtual InstructionCost
779 VectorType *SubTp, ArrayRef<const Value *> Args = {},
780 const Instruction *CxtI = nullptr) const {
781 return 1;
782 }
783
784 virtual InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst,
785 Type *Src, TTI::CastContextHint CCH,
787 const Instruction *I) const {
788 switch (Opcode) {
789 default:
790 break;
791 case Instruction::IntToPtr: {
792 unsigned SrcSize = Src->getScalarSizeInBits();
793 if (DL.isLegalInteger(SrcSize) &&
794 SrcSize <= DL.getPointerTypeSizeInBits(Dst))
795 return 0;
796 break;
797 }
798 case Instruction::PtrToAddr: {
799 unsigned DstSize = Dst->getScalarSizeInBits();
800 assert(DstSize == DL.getAddressSizeInBits(Src));
801 if (DL.isLegalInteger(DstSize))
802 return 0;
803 break;
804 }
805 case Instruction::PtrToInt: {
806 unsigned DstSize = Dst->getScalarSizeInBits();
807 if (DL.isLegalInteger(DstSize) &&
808 DstSize >= DL.getPointerTypeSizeInBits(Src))
809 return 0;
810 break;
811 }
812 case Instruction::BitCast:
813 if (Dst == Src || (Dst->isPointerTy() && Src->isPointerTy()))
814 // Identity and pointer-to-pointer casts are free.
815 return 0;
816 break;
817 case Instruction::Trunc: {
818 // trunc to a native type is free (assuming the target has compare and
819 // shift-right of the same width).
820 TypeSize DstSize = DL.getTypeSizeInBits(Dst);
821 if (!DstSize.isScalable() && DL.isLegalInteger(DstSize.getFixedValue()))
822 return 0;
823 break;
824 }
825 }
826 return 1;
827 }
828
829 virtual InstructionCost
830 getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
831 unsigned Index, TTI::TargetCostKind CostKind) const {
832 return 1;
833 }
834
835 virtual InstructionCost getCFInstrCost(unsigned Opcode,
837 const Instruction *I = nullptr) const {
838 // A phi would be free, unless we're costing the throughput because it
839 // will require a register.
840 if (Opcode == Instruction::PHI && CostKind != TTI::TCK_RecipThroughput)
841 return 0;
842 return 1;
843 }
844
846 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
848 TTI::OperandValueInfo Op2Info, const Instruction *I) const {
849 return 1;
850 }
851
853 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
854 const Value *Op0, const Value *Op1,
856 return 1;
857 }
858
859 /// \param ScalarUserAndIdx encodes the information about extracts from a
860 /// vector with 'Scalar' being the value being extracted,'User' being the user
861 /// of the extract(nullptr if user is not known before vectorization) and
862 /// 'Idx' being the extract lane.
864 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
865 Value *Scalar,
866 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx,
868 return 1;
869 }
870
873 unsigned Index,
875 return 1;
876 }
877
878 virtual InstructionCost
881 unsigned Index) const {
882 return 1;
883 }
884
885 virtual InstructionCost
886 getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF,
887 const APInt &DemandedDstElts,
889 return 1;
890 }
891
892 virtual InstructionCost
895 // Note: The `insertvalue` cost here is chosen to match the default case of
896 // getInstructionCost() -- as prior to adding this helper `insertvalue` was
897 // not handled.
898 if (Opcode == Instruction::InsertValue &&
900 return TTI::TCC_Basic;
901 return TTI::TCC_Free;
902 }
903
904 virtual InstructionCost
905 getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
907 TTI::OperandValueInfo OpInfo, const Instruction *I) const {
908 return 1;
909 }
910
912 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
913 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
914 bool UseMaskForCond, bool UseMaskForGaps) const {
915 return 1;
916 }
917
918 virtual InstructionCost
921 switch (ICA.getID()) {
922 default:
923 break;
924 case Intrinsic::allow_runtime_check:
925 case Intrinsic::allow_ubsan_check:
926 case Intrinsic::annotation:
927 case Intrinsic::assume:
928 case Intrinsic::sideeffect:
929 case Intrinsic::pseudoprobe:
930 case Intrinsic::arithmetic_fence:
931 case Intrinsic::dbg_assign:
932 case Intrinsic::dbg_declare:
933 case Intrinsic::dbg_value:
934 case Intrinsic::dbg_label:
935 case Intrinsic::invariant_start:
936 case Intrinsic::invariant_end:
937 case Intrinsic::launder_invariant_group:
938 case Intrinsic::strip_invariant_group:
939 case Intrinsic::is_constant:
940 case Intrinsic::lifetime_start:
941 case Intrinsic::lifetime_end:
942 case Intrinsic::experimental_noalias_scope_decl:
943 case Intrinsic::objectsize:
944 case Intrinsic::ptr_annotation:
945 case Intrinsic::var_annotation:
946 case Intrinsic::experimental_gc_result:
947 case Intrinsic::experimental_gc_relocate:
948 case Intrinsic::coro_alloc:
949 case Intrinsic::coro_begin:
950 case Intrinsic::coro_begin_custom_abi:
951 case Intrinsic::coro_free:
952 case Intrinsic::coro_end:
953 case Intrinsic::coro_frame:
954 case Intrinsic::coro_size:
955 case Intrinsic::coro_align:
956 case Intrinsic::coro_suspend:
957 case Intrinsic::coro_subfn_addr:
958 case Intrinsic::threadlocal_address:
959 case Intrinsic::experimental_widenable_condition:
960 case Intrinsic::ssa_copy:
961 // These intrinsics don't actually represent code after lowering.
962 return 0;
963 case Intrinsic::bswap:
964 if (!ICA.getReturnType()->isVectorTy() &&
965 !isPowerOf2_64(DL.getTypeSizeInBits(ICA.getReturnType())))
967 }
968 return 1;
969 }
970
971 virtual InstructionCost
974 switch (MICA.getID()) {
975 case Intrinsic::masked_scatter:
976 case Intrinsic::masked_gather:
977 case Intrinsic::masked_load:
978 case Intrinsic::masked_store:
979 case Intrinsic::vp_scatter:
980 case Intrinsic::vp_gather:
981 case Intrinsic::masked_compressstore:
982 case Intrinsic::masked_expandload:
983 return 1;
984 }
986 }
987
991 return 1;
992 }
993
994 // Assume that we have a register of the right size for the type.
995 virtual unsigned getNumberOfParts(Type *Tp) const { return 1; }
996
999 const SCEV *,
1000 TTI::TargetCostKind) const {
1001 return 0;
1002 }
1003
1004 virtual InstructionCost
1006 std::optional<FastMathFlags> FMF,
1007 TTI::TargetCostKind) const {
1008 return 1;
1009 }
1010
1013 TTI::TargetCostKind) const {
1014 return 1;
1015 }
1016
1017 virtual InstructionCost
1018 getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy,
1019 VectorType *Ty, std::optional<FastMathFlags> FMF,
1021 return 1;
1022 }
1023
1024 virtual InstructionCost
1025 getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy,
1027 return 1;
1028 }
1029
1030 virtual InstructionCost
1032 return 0;
1033 }
1034
1036 MemIntrinsicInfo &Info) const {
1037 return false;
1038 }
1039
1040 virtual unsigned getAtomicMemIntrinsicMaxElementSize() const {
1041 // Note for overrides: You must ensure for all element unordered-atomic
1042 // memory intrinsics that all power-of-2 element sizes up to, and
1043 // including, the return value of this method have a corresponding
1044 // runtime lib call. These runtime lib call definitions can be found
1045 // in RuntimeLibcalls.h
1046 return 0;
1047 }
1048
1049 virtual Value *
1051 bool CanCreate = true) const {
1052 return nullptr;
1053 }
1054
1055 virtual Type *
1057 unsigned SrcAddrSpace, unsigned DestAddrSpace,
1058 Align SrcAlign, Align DestAlign,
1059 std::optional<uint32_t> AtomicElementSize) const {
1060 return AtomicElementSize ? Type::getIntNTy(Context, *AtomicElementSize * 8)
1061 : Type::getInt8Ty(Context);
1062 }
1063
1065 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
1066 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
1067 Align SrcAlign, Align DestAlign,
1068 std::optional<uint32_t> AtomicCpySize) const {
1069 unsigned OpSizeInBytes = AtomicCpySize.value_or(1);
1070 Type *OpType = Type::getIntNTy(Context, OpSizeInBytes * 8);
1071 for (unsigned i = 0; i != RemainingBytes; i += OpSizeInBytes)
1072 OpsOut.push_back(OpType);
1073 }
1074
1075 virtual bool areInlineCompatible(const Function *Caller,
1076 const Function *Callee) const {
1077 return (Caller->getFnAttribute("target-cpu") ==
1078 Callee->getFnAttribute("target-cpu")) &&
1079 (Caller->getFnAttribute("target-features") ==
1080 Callee->getFnAttribute("target-features"));
1081 }
1082
1083 virtual unsigned getInlineCallPenalty(const Function *F, const CallBase &Call,
1084 unsigned DefaultCallPenalty) const {
1085 return DefaultCallPenalty;
1086 }
1087
1088 virtual bool
1090 const Attribute &Attr) const {
1091 // Copy attributes by default
1092 return true;
1093 }
1094
1095 virtual bool areTypesABICompatible(const Function *Caller,
1096 const Function *Callee,
1097 ArrayRef<Type *> Types) const {
1098 return (Caller->getFnAttribute("target-cpu") ==
1099 Callee->getFnAttribute("target-cpu")) &&
1100 (Caller->getFnAttribute("target-features") ==
1101 Callee->getFnAttribute("target-features"));
1102 }
1103
1105 return false;
1106 }
1107
1109 return false;
1110 }
1111
1112 virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
1113 return 128;
1114 }
1115
1116 virtual bool isLegalToVectorizeLoad(LoadInst *LI) const { return true; }
1117
1118 virtual bool isLegalToVectorizeStore(StoreInst *SI) const { return true; }
1119
1120 virtual bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
1121 Align Alignment,
1122 unsigned AddrSpace) const {
1123 return true;
1124 }
1125
1126 virtual bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
1127 Align Alignment,
1128 unsigned AddrSpace) const {
1129 return true;
1130 }
1131
1133 ElementCount VF) const {
1134 return true;
1135 }
1136
1138 return true;
1139 }
1140
1141 virtual unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
1142 unsigned ChainSizeInBytes,
1143 VectorType *VecTy) const {
1144 return VF;
1145 }
1146
1147 virtual unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
1148 unsigned ChainSizeInBytes,
1149 VectorType *VecTy) const {
1150 return VF;
1151 }
1152
1153 virtual bool preferFixedOverScalableIfEqualCost(bool IsEpilogue) const {
1154 return false;
1155 }
1156
1157 virtual bool preferInLoopReduction(RecurKind Kind, Type *Ty) const {
1158 return false;
1159 }
1160 virtual bool preferAlternateOpcodeVectorization() const { return true; }
1161
1162 virtual bool preferPredicatedReductionSelect() const { return false; }
1163
1164 virtual bool preferEpilogueVectorization(ElementCount Iters) const {
1165 // We consider epilogue vectorization unprofitable for targets that
1166 // don't consider interleaving beneficial (eg. MVE).
1167 return getMaxInterleaveFactor(Iters) > 1;
1168 }
1169
1170 virtual bool shouldConsiderVectorizationRegPressure() const { return false; }
1171
1172 virtual bool shouldExpandReduction(const IntrinsicInst *II) const {
1173 return true;
1174 }
1175
1176 virtual TTI::ReductionShuffle
1180
1181 virtual unsigned getGISelRematGlobalCost() const { return 1; }
1182
1183 virtual unsigned getMinTripCountTailFoldingThreshold() const { return 0; }
1184
1185 virtual bool supportsScalableVectors() const { return false; }
1186
1187 virtual bool enableScalableVectorization() const { return false; }
1188
1189 virtual bool hasActiveVectorLength() const { return false; }
1190
1192 SmallVectorImpl<Use *> &Ops) const {
1193 return false;
1194 }
1195
1196 virtual bool isVectorShiftByScalarCheap(Type *Ty) const { return false; }
1197
1204
1205 virtual bool hasArmWideBranch(bool) const { return false; }
1206
1207 virtual APInt getFeatureMask(const Function &F) const {
1208 return APInt::getZero(32);
1209 }
1210
1211 virtual APInt getPriorityMask(const Function &F) const {
1212 return APInt::getZero(32);
1213 }
1214
1215 virtual bool isMultiversionedFunction(const Function &F) const {
1216 return false;
1217 }
1218
1219 virtual unsigned getMaxNumArgs() const { return UINT_MAX; }
1220
1221 virtual unsigned getNumBytesToPadGlobalArray(unsigned Size,
1222 Type *ArrayType) const {
1223 return 0;
1224 }
1225
1227 const Function &F,
1228 SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const {}
1229
1230 virtual bool allowVectorElementIndexingUsingGEP() const { return true; }
1231
1232 virtual bool isUniform(const Instruction *I,
1233 const SmallBitVector &UniformArgs) const {
1234 llvm_unreachable("target must implement isUniform for Custom uniformity");
1235 }
1236
1237protected:
1238 // Obtain the minimum required size to hold the value (without the sign)
1239 // In case of a vector it returns the min required size for one element.
1240 unsigned minRequiredElementSize(const Value *Val, bool &isSigned) const {
1242 const auto *VectorValue = cast<Constant>(Val);
1243
1244 // In case of a vector need to pick the max between the min
1245 // required size for each element
1246 auto *VT = cast<FixedVectorType>(Val->getType());
1247
1248 // Assume unsigned elements
1249 isSigned = false;
1250
1251 // The max required size is the size of the vector element type
1252 unsigned MaxRequiredSize =
1253 VT->getElementType()->getPrimitiveSizeInBits().getFixedValue();
1254
1255 unsigned MinRequiredSize = 0;
1256 for (unsigned i = 0, e = VT->getNumElements(); i < e; ++i) {
1257 if (auto *IntElement =
1258 dyn_cast<ConstantInt>(VectorValue->getAggregateElement(i))) {
1259 bool signedElement = IntElement->getValue().isNegative();
1260 // Get the element min required size.
1261 unsigned ElementMinRequiredSize =
1262 IntElement->getValue().getSignificantBits() - 1;
1263 // In case one element is signed then all the vector is signed.
1264 isSigned |= signedElement;
1265 // Save the max required bit size between all the elements.
1266 MinRequiredSize = std::max(MinRequiredSize, ElementMinRequiredSize);
1267 } else {
1268 // not an int constant element
1269 return MaxRequiredSize;
1270 }
1271 }
1272 return MinRequiredSize;
1273 }
1274
1275 if (const auto *CI = dyn_cast<ConstantInt>(Val)) {
1276 isSigned = CI->getValue().isNegative();
1277 return CI->getValue().getSignificantBits() - 1;
1278 }
1279
1280 if (const auto *Cast = dyn_cast<SExtInst>(Val)) {
1281 isSigned = true;
1282 return Cast->getSrcTy()->getScalarSizeInBits() - 1;
1283 }
1284
1285 if (const auto *Cast = dyn_cast<ZExtInst>(Val)) {
1286 isSigned = false;
1287 return Cast->getSrcTy()->getScalarSizeInBits();
1288 }
1289
1290 isSigned = false;
1291 return Val->getType()->getScalarSizeInBits();
1292 }
1293
1294 bool isStridedAccess(const SCEV *Ptr) const {
1295 return Ptr && isa<SCEVAddRecExpr>(Ptr);
1296 }
1297
1299 const SCEV *Ptr) const {
1300 if (!isStridedAccess(Ptr))
1301 return nullptr;
1302 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ptr);
1303 return dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(*SE));
1304 }
1305
1307 int64_t MergeDistance) const {
1308 const SCEVConstant *Step = getConstantStrideStep(SE, Ptr);
1309 if (!Step)
1310 return false;
1311 APInt StrideVal = Step->getAPInt();
1312 if (StrideVal.getBitWidth() > 64)
1313 return false;
1314 // FIXME: Need to take absolute value for negative stride case.
1315 return StrideVal.getSExtValue() < MergeDistance;
1316 }
1317};
1318
1319/// CRTP base class for use as a mix-in that aids implementing
1320/// a TargetTransformInfo-compatible class.
1321template <typename T>
1323private:
1324 typedef TargetTransformInfoImplBase BaseT;
1325
1326protected:
1328
1329public:
1330 InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
1331 ArrayRef<const Value *> Operands, Type *AccessType,
1332 TTI::TargetCostKind CostKind) const override {
1333 assert(PointeeType && Ptr && "can't get GEPCost of nullptr");
1334 auto *BaseGV = dyn_cast<GlobalValue>(Ptr->stripPointerCasts());
1335 bool HasBaseReg = (BaseGV == nullptr);
1336
1337 auto PtrSizeBits = DL.getPointerTypeSizeInBits(Ptr->getType());
1338 APInt BaseOffset(PtrSizeBits, 0);
1339 int64_t Scale = 0;
1340
1341 auto GTI = gep_type_begin(PointeeType, Operands);
1342 Type *TargetType = nullptr;
1343
1344 // Handle the case where the GEP instruction has a single operand,
1345 // the basis, therefore TargetType is a nullptr.
1346 if (Operands.empty())
1347 return !BaseGV ? TTI::TCC_Free : TTI::TCC_Basic;
1348
1349 for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) {
1350 TargetType = GTI.getIndexedType();
1351 // We assume that the cost of Scalar GEP with constant index and the
1352 // cost of Vector GEP with splat constant index are the same.
1353 const ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I);
1354 if (!ConstIdx)
1355 if (auto Splat = getSplatValue(*I))
1356 ConstIdx = dyn_cast<ConstantInt>(Splat);
1357 if (StructType *STy = GTI.getStructTypeOrNull()) {
1358 // For structures the index is always splat or scalar constant
1359 assert(ConstIdx && "Unexpected GEP index");
1360 uint64_t Field = ConstIdx->getZExtValue();
1361 BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
1362 } else {
1363 // If this operand is a scalable type, bail out early.
1364 // TODO: Make isLegalAddressingMode TypeSize aware.
1365 if (TargetType->isScalableTy())
1366 return TTI::TCC_Basic;
1367 int64_t ElementSize =
1368 GTI.getSequentialElementStride(DL).getFixedValue();
1369 if (ConstIdx) {
1370 BaseOffset +=
1371 ConstIdx->getValue().sextOrTrunc(PtrSizeBits) * ElementSize;
1372 } else {
1373 // Needs scale register.
1374 if (Scale != 0)
1375 // No addressing mode takes two scale registers.
1376 return TTI::TCC_Basic;
1377 Scale = ElementSize;
1378 }
1379 }
1380 }
1381
1382 // If we haven't been provided a hint, use the target type for now.
1383 //
1384 // TODO: Take a look at potentially removing this: This is *slightly* wrong
1385 // as it's possible to have a GEP with a foldable target type but a memory
1386 // access that isn't foldable. For example, this load isn't foldable on
1387 // RISC-V:
1388 //
1389 // %p = getelementptr i32, ptr %base, i32 42
1390 // %x = load <2 x i32>, ptr %p
1391 if (!AccessType)
1392 AccessType = TargetType;
1393
1394 // If the final address of the GEP is a legal addressing mode for the given
1395 // access type, then we can fold it into its users.
1396 if (static_cast<const T *>(this)->isLegalAddressingMode(
1397 AccessType, const_cast<GlobalValue *>(BaseGV),
1398 BaseOffset.sextOrTrunc(64).getSExtValue(), HasBaseReg, Scale,
1400 return TTI::TCC_Free;
1401
1402 // TODO: Instead of returning TCC_Basic here, we should use
1403 // getArithmeticInstrCost. Or better yet, provide a hook to let the target
1404 // model it.
1405 return TTI::TCC_Basic;
1406 }
1407
1410 const TTI::PointersChainInfo &Info, Type *AccessTy,
1411 TTI::TargetCostKind CostKind) const override {
1413 // In the basic model we take into account GEP instructions only
1414 // (although here can come alloca instruction, a value, constants and/or
1415 // constant expressions, PHIs, bitcasts ... whatever allowed to be used as a
1416 // pointer). Typically, if Base is a not a GEP-instruction and all the
1417 // pointers are relative to the same base address, all the rest are
1418 // either GEP instructions, PHIs, bitcasts or constants. When we have same
1419 // base, we just calculate cost of each non-Base GEP as an ADD operation if
1420 // any their index is a non-const.
1421 // If no known dependecies between the pointers cost is calculated as a sum
1422 // of costs of GEP instructions.
1423 for (const Value *V : Ptrs) {
1424 const auto *GEP = dyn_cast<GetElementPtrInst>(V);
1425 if (!GEP)
1426 continue;
1427 if (Info.isSameBase() && V != Base) {
1428 if (GEP->hasAllConstantIndices())
1429 continue;
1430 Cost += static_cast<const T *>(this)->getArithmeticInstrCost(
1431 Instruction::Add, GEP->getType(), CostKind,
1432 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None},
1433 {});
1434 } else {
1435 SmallVector<const Value *> Indices(GEP->indices());
1436 Cost += static_cast<const T *>(this)->getGEPCost(
1437 GEP->getSourceElementType(), GEP->getPointerOperand(), Indices,
1438 AccessTy, CostKind);
1439 }
1440 }
1441 return Cost;
1442 }
1443
1446 TTI::TargetCostKind CostKind) const override {
1447 using namespace llvm::PatternMatch;
1448
1449 auto *TargetTTI = static_cast<const T *>(this);
1450 // Handle non-intrinsic calls, invokes, and callbr.
1451 // FIXME: Unlikely to be true for anything but CodeSize.
1452 auto *CB = dyn_cast<CallBase>(U);
1453 if (CB && !isa<IntrinsicInst>(U)) {
1454 if (const Function *F = CB->getCalledFunction()) {
1455 if (!TargetTTI->isLoweredToCall(F))
1456 return TTI::TCC_Basic; // Give a basic cost if it will be lowered
1457
1458 return TTI::TCC_Basic * (F->getFunctionType()->getNumParams() + 1);
1459 }
1460 // For indirect or other calls, scale cost by number of arguments.
1461 return TTI::TCC_Basic * (CB->arg_size() + 1);
1462 }
1463
1464 Type *Ty = U->getType();
1465 unsigned Opcode = Operator::getOpcode(U);
1466 auto *I = dyn_cast<Instruction>(U);
1467 switch (Opcode) {
1468 default:
1469 break;
1470 case Instruction::Call: {
1471 assert(isa<IntrinsicInst>(U) && "Unexpected non-intrinsic call");
1472 auto *Intrinsic = cast<IntrinsicInst>(U);
1473 IntrinsicCostAttributes CostAttrs(Intrinsic->getIntrinsicID(), *CB);
1474 return TargetTTI->getIntrinsicInstrCost(CostAttrs, CostKind);
1475 }
1476 case Instruction::UncondBr:
1477 case Instruction::CondBr:
1478 case Instruction::Ret:
1479 case Instruction::PHI:
1480 case Instruction::Switch:
1481 return TargetTTI->getCFInstrCost(Opcode, CostKind, I);
1482 case Instruction::Freeze:
1483 return TTI::TCC_Free;
1484 case Instruction::ExtractValue:
1485 case Instruction::InsertValue:
1486 return TargetTTI->getInsertExtractValueCost(Opcode, CostKind);
1487 case Instruction::Alloca:
1488 if (cast<AllocaInst>(U)->isStaticAlloca())
1489 return TTI::TCC_Free;
1490 break;
1491 case Instruction::GetElementPtr: {
1492 const auto *GEP = cast<GEPOperator>(U);
1493 Type *AccessType = nullptr;
1494 // For now, only provide the AccessType in the simple case where the GEP
1495 // only has one user.
1496 if (GEP->hasOneUser() && I)
1497 AccessType = I->user_back()->getAccessType();
1498
1499 return TargetTTI->getGEPCost(GEP->getSourceElementType(),
1500 Operands.front(), Operands.drop_front(),
1501 AccessType, CostKind);
1502 }
1503 case Instruction::Add:
1504 case Instruction::FAdd:
1505 case Instruction::Sub:
1506 case Instruction::FSub:
1507 case Instruction::Mul:
1508 case Instruction::FMul:
1509 case Instruction::UDiv:
1510 case Instruction::SDiv:
1511 case Instruction::FDiv:
1512 case Instruction::URem:
1513 case Instruction::SRem:
1514 case Instruction::FRem:
1515 case Instruction::Shl:
1516 case Instruction::LShr:
1517 case Instruction::AShr:
1518 case Instruction::And:
1519 case Instruction::Or:
1520 case Instruction::Xor:
1521 case Instruction::FNeg: {
1522 const TTI::OperandValueInfo Op1Info = TTI::getOperandInfo(Operands[0]);
1523 TTI::OperandValueInfo Op2Info;
1524 if (Opcode != Instruction::FNeg)
1525 Op2Info = TTI::getOperandInfo(Operands[1]);
1526 return TargetTTI->getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
1527 Op2Info, Operands, I);
1528 }
1529 case Instruction::IntToPtr:
1530 case Instruction::PtrToAddr:
1531 case Instruction::PtrToInt:
1532 case Instruction::SIToFP:
1533 case Instruction::UIToFP:
1534 case Instruction::FPToUI:
1535 case Instruction::FPToSI:
1536 case Instruction::Trunc:
1537 case Instruction::FPTrunc:
1538 case Instruction::BitCast:
1539 case Instruction::FPExt:
1540 case Instruction::SExt:
1541 case Instruction::ZExt:
1542 case Instruction::AddrSpaceCast: {
1543 Type *OpTy = Operands[0]->getType();
1544 return TargetTTI->getCastInstrCost(
1545 Opcode, Ty, OpTy, TTI::getCastContextHint(I), CostKind, I);
1546 }
1547 case Instruction::Store: {
1548 auto *SI = cast<StoreInst>(U);
1549 Type *ValTy = Operands[0]->getType();
1550 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(Operands[0]);
1551 return TargetTTI->getMemoryOpCost(Opcode, ValTy, SI->getAlign(),
1552 SI->getPointerAddressSpace(), CostKind,
1553 OpInfo, I);
1554 }
1555 case Instruction::Load: {
1556 // FIXME: Arbitary cost which could come from the backend.
1558 return 4;
1559 auto *LI = cast<LoadInst>(U);
1560 Type *LoadType = U->getType();
1561 // If there is a non-register sized type, the cost estimation may expand
1562 // it to be several instructions to load into multiple registers on the
1563 // target. But, if the only use of the load is a trunc instruction to a
1564 // register sized type, the instruction selector can combine these
1565 // instructions to be a single load. So, in this case, we use the
1566 // destination type of the trunc instruction rather than the load to
1567 // accurately estimate the cost of this load instruction.
1568 if (CostKind == TTI::TCK_CodeSize && LI->hasOneUse() &&
1569 !LoadType->isVectorTy()) {
1570 if (const TruncInst *TI = dyn_cast<TruncInst>(*LI->user_begin()))
1571 LoadType = TI->getDestTy();
1572 }
1573 return TargetTTI->getMemoryOpCost(Opcode, LoadType, LI->getAlign(),
1575 {TTI::OK_AnyValue, TTI::OP_None}, I);
1576 }
1577 case Instruction::Select: {
1578 const Value *Op0, *Op1;
1579 if (match(U, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
1580 match(U, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) {
1581 // select x, y, false --> x & y
1582 // select x, true, y --> x | y
1583 const auto Op1Info = TTI::getOperandInfo(Op0);
1584 const auto Op2Info = TTI::getOperandInfo(Op1);
1585 assert(Op0->getType()->getScalarSizeInBits() == 1 &&
1586 Op1->getType()->getScalarSizeInBits() == 1);
1587
1588 SmallVector<const Value *, 2> Operands{Op0, Op1};
1589 return TargetTTI->getArithmeticInstrCost(
1590 match(U, m_LogicalOr()) ? Instruction::Or : Instruction::And, Ty,
1591 CostKind, Op1Info, Op2Info, Operands, I);
1592 }
1593 const auto Op1Info = TTI::getOperandInfo(Operands[1]);
1594 const auto Op2Info = TTI::getOperandInfo(Operands[2]);
1595 Type *CondTy = Operands[0]->getType();
1596 return TargetTTI->getCmpSelInstrCost(Opcode, U->getType(), CondTy,
1598 CostKind, Op1Info, Op2Info, I);
1599 }
1600 case Instruction::ICmp:
1601 case Instruction::FCmp: {
1602 const auto Op1Info = TTI::getOperandInfo(Operands[0]);
1603 const auto Op2Info = TTI::getOperandInfo(Operands[1]);
1604 Type *ValTy = Operands[0]->getType();
1605 // TODO: Also handle ICmp/FCmp constant expressions.
1606 return TargetTTI->getCmpSelInstrCost(Opcode, ValTy, U->getType(),
1607 I ? cast<CmpInst>(I)->getPredicate()
1609 CostKind, Op1Info, Op2Info, I);
1610 }
1611 case Instruction::InsertElement: {
1612 auto *IE = dyn_cast<InsertElementInst>(U);
1613 if (!IE)
1614 return TTI::TCC_Basic; // FIXME
1615 unsigned Idx = -1;
1616 if (auto *CI = dyn_cast<ConstantInt>(Operands[2]))
1617 if (CI->getValue().getActiveBits() <= 32)
1618 Idx = CI->getZExtValue();
1619 return TargetTTI->getVectorInstrCost(*IE, Ty, CostKind, Idx,
1621 }
1622 case Instruction::ShuffleVector: {
1623 auto *Shuffle = dyn_cast<ShuffleVectorInst>(U);
1624 if (!Shuffle)
1625 return TTI::TCC_Basic; // FIXME
1626
1627 auto *VecTy = cast<VectorType>(U->getType());
1628 auto *VecSrcTy = cast<VectorType>(Operands[0]->getType());
1629 ArrayRef<int> Mask = Shuffle->getShuffleMask();
1630 int NumSubElts, SubIndex;
1631
1632 // Treat undef/poison mask as free (no matter the length).
1633 if (all_of(Mask, [](int M) { return M < 0; }))
1634 return TTI::TCC_Free;
1635
1636 // TODO: move more of this inside improveShuffleKindFromMask.
1637 if (Shuffle->changesLength()) {
1638 // Treat a 'subvector widening' as a free shuffle.
1639 if (Shuffle->increasesLength() && Shuffle->isIdentityWithPadding())
1640 return TTI::TCC_Free;
1641
1642 if (Shuffle->isExtractSubvectorMask(SubIndex))
1643 return TargetTTI->getShuffleCost(TTI::SK_ExtractSubvector, VecTy,
1644 VecSrcTy, Mask, CostKind, SubIndex,
1645 VecTy, Operands, Shuffle);
1646
1647 if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex))
1648 return TargetTTI->getShuffleCost(
1649 TTI::SK_InsertSubvector, VecTy, VecSrcTy, Mask, CostKind,
1650 SubIndex,
1651 FixedVectorType::get(VecTy->getScalarType(), NumSubElts),
1652 Operands, Shuffle);
1653
1654 int ReplicationFactor, VF;
1655 if (Shuffle->isReplicationMask(ReplicationFactor, VF)) {
1656 APInt DemandedDstElts = APInt::getZero(Mask.size());
1657 for (auto I : enumerate(Mask)) {
1658 if (I.value() != PoisonMaskElem)
1659 DemandedDstElts.setBit(I.index());
1660 }
1661 return TargetTTI->getReplicationShuffleCost(
1662 VecSrcTy->getElementType(), ReplicationFactor, VF,
1663 DemandedDstElts, CostKind);
1664 }
1665
1666 bool IsUnary = isa<UndefValue>(Operands[1]);
1667 NumSubElts = VecSrcTy->getElementCount().getKnownMinValue();
1668 SmallVector<int, 16> AdjustMask(Mask);
1669
1670 // Widening shuffle - widening the source(s) to the new length
1671 // (treated as free - see above), and then perform the adjusted
1672 // shuffle at that width.
1673 if (Shuffle->increasesLength()) {
1674 for (int &M : AdjustMask)
1675 M = M >= NumSubElts ? (M + (Mask.size() - NumSubElts)) : M;
1676
1677 return TargetTTI->getShuffleCost(
1679 VecTy, AdjustMask, CostKind, 0, nullptr, Operands, Shuffle);
1680 }
1681
1682 // Narrowing shuffle - perform shuffle at original wider width and
1683 // then extract the lower elements.
1684 // FIXME: This can assume widening, which is not true of all vector
1685 // architectures (and is not even the default).
1686 AdjustMask.append(NumSubElts - Mask.size(), PoisonMaskElem);
1687
1688 InstructionCost ShuffleCost = TargetTTI->getShuffleCost(
1690 VecSrcTy, VecSrcTy, AdjustMask, CostKind, 0, nullptr, Operands,
1691 Shuffle);
1692
1693 SmallVector<int, 16> ExtractMask(Mask.size());
1694 std::iota(ExtractMask.begin(), ExtractMask.end(), 0);
1695 return ShuffleCost + TargetTTI->getShuffleCost(
1696 TTI::SK_ExtractSubvector, VecTy, VecSrcTy,
1697 ExtractMask, CostKind, 0, VecTy, {}, Shuffle);
1698 }
1699
1700 if (Shuffle->isIdentity())
1701 return TTI::TCC_Free;
1702
1703 if (Shuffle->isReverse())
1704 return TargetTTI->getShuffleCost(TTI::SK_Reverse, VecTy, VecSrcTy, Mask,
1705 CostKind, 0, nullptr, Operands,
1706 Shuffle);
1707
1708 if (Shuffle->isTranspose())
1709 return TargetTTI->getShuffleCost(TTI::SK_Transpose, VecTy, VecSrcTy,
1710 Mask, CostKind, 0, nullptr, Operands,
1711 Shuffle);
1712
1713 if (Shuffle->isZeroEltSplat())
1714 return TargetTTI->getShuffleCost(TTI::SK_Broadcast, VecTy, VecSrcTy,
1715 Mask, CostKind, 0, nullptr, Operands,
1716 Shuffle);
1717
1718 if (Shuffle->isSingleSource())
1719 return TargetTTI->getShuffleCost(TTI::SK_PermuteSingleSrc, VecTy,
1720 VecSrcTy, Mask, CostKind, 0, nullptr,
1721 Operands, Shuffle);
1722
1723 if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex))
1724 return TargetTTI->getShuffleCost(
1725 TTI::SK_InsertSubvector, VecTy, VecSrcTy, Mask, CostKind, SubIndex,
1726 FixedVectorType::get(VecTy->getScalarType(), NumSubElts), Operands,
1727 Shuffle);
1728
1729 if (Shuffle->isSelect())
1730 return TargetTTI->getShuffleCost(TTI::SK_Select, VecTy, VecSrcTy, Mask,
1731 CostKind, 0, nullptr, Operands,
1732 Shuffle);
1733
1734 if (Shuffle->isSplice(SubIndex))
1735 return TargetTTI->getShuffleCost(TTI::SK_Splice, VecTy, VecSrcTy, Mask,
1736 CostKind, SubIndex, nullptr, Operands,
1737 Shuffle);
1738
1739 return TargetTTI->getShuffleCost(TTI::SK_PermuteTwoSrc, VecTy, VecSrcTy,
1740 Mask, CostKind, 0, nullptr, Operands,
1741 Shuffle);
1742 }
1743 case Instruction::ExtractElement: {
1744 auto *EEI = dyn_cast<ExtractElementInst>(U);
1745 if (!EEI)
1746 return TTI::TCC_Basic; // FIXME
1747 unsigned Idx = -1;
1748 if (auto *CI = dyn_cast<ConstantInt>(Operands[1]))
1749 if (CI->getValue().getActiveBits() <= 32)
1750 Idx = CI->getZExtValue();
1751 Type *DstTy = Operands[0]->getType();
1752 return TargetTTI->getVectorInstrCost(*EEI, DstTy, CostKind, Idx);
1753 }
1754 }
1755
1756 // By default, just classify everything remaining as 'basic'.
1757 return TTI::TCC_Basic;
1758 }
1759
1761 auto *TargetTTI = static_cast<const T *>(this);
1762 SmallVector<const Value *, 4> Ops(I->operand_values());
1763 InstructionCost Cost = TargetTTI->getInstructionCost(
1766 }
1767
1768 bool supportsTailCallFor(const CallBase *CB) const override {
1769 return static_cast<const T *>(this)->supportsTailCalls();
1770 }
1771};
1772} // namespace llvm
1773
1774#endif
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
Hexagon Common GEP
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define T
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition APInt.h:78
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1345
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1503
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1072
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1577
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition ArrayRef.h:195
const T & front() const
front - Get the first element.
Definition ArrayRef.h:145
iterator end() const
Definition ArrayRef.h:131
iterator begin() const
Definition ArrayRef.h:130
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
Class to represent array types.
A cache of @llvm.assume calls within a function.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
Conditional Branch instruction.
This is the shared class of boolean and integer constants.
Definition Constants.h:87
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:873
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Information for memory intrinsic cost model.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition Operator.h:43
The optimization diagnostic interface.
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
This node represents a polynomial recurrence on the trip count of the specified loop.
SCEVUse getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
This class represents a constant integer value.
const APInt & getAPInt() const
This class represents an analyzed expression in the program.
The main scalar evolution driver.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
static StackOffset getScalable(int64_t Scalable)
Definition TypeSize.h:40
static StackOffset getFixed(int64_t Fixed)
Definition TypeSize.h:39
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Class to represent struct types.
Multiway switch.
Provides information about what library functions are available for the current target.
virtual InstructionCost getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const TTI::PointersChainInfo &Info, Type *AccessTy, TTI::TargetCostKind CostKind) const
virtual bool preferAlternateOpcodeVectorization() const
virtual bool isProfitableLSRChainElement(Instruction *I) const
virtual unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const
virtual bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const
virtual InstructionCost getCostOfKeepingLiveOverCall(ArrayRef< Type * > Tys) const
virtual TailFoldingStyle getPreferredTailFoldingStyle() const
virtual unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const
virtual InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) const
virtual const DataLayout & getDataLayout() const
virtual bool preferFixedOverScalableIfEqualCost(bool IsEpilogue) const
virtual std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
virtual InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
virtual bool enableInterleavedAccessVectorization() const
virtual InstructionCost getPartialReductionCost(unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType, ElementCount VF, TTI::PartialReductionExtendKind OpAExtend, TTI::PartialReductionExtendKind OpBExtend, std::optional< unsigned > BinOp, TTI::TargetCostKind CostKind, std::optional< FastMathFlags > FMF) const
virtual InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
virtual InstructionCost getFPOpCost(Type *Ty) const
virtual unsigned getMaxInterleaveFactor(ElementCount VF) const
virtual bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const
virtual TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const
virtual bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
bool isStridedAccess(const SCEV *Ptr) const
virtual unsigned getAtomicMemIntrinsicMaxElementSize() const
virtual Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
virtual TargetTransformInfo::VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const
virtual bool enableAggressiveInterleaving(bool LoopHasReductions) const
virtual std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
virtual bool isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace, TTI::MaskKind MaskKind) const
virtual InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *, const SCEV *, TTI::TargetCostKind) const
virtual bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const
virtual bool isIndexedLoadLegal(TTI::MemIndexedMode Mode, Type *Ty) const
virtual unsigned adjustInliningThreshold(const CallBase *CB) const
virtual unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
virtual bool shouldDropLSRSolutionIfLessProfitable() const
virtual bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const
virtual bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace, TTI::MaskKind MaskKind) const
virtual bool hasDivRemOp(Type *DataType, bool IsSigned) const
virtual bool isLegalStridedLoadStore(Type *DataType, Align Alignment) const
virtual bool isLegalICmpImmediate(int64_t Imm) const
virtual InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo, const Instruction *I) const
virtual bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const
virtual bool haveFastSqrt(Type *Ty) const
virtual ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const
virtual bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
virtual bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const
virtual unsigned getRegisterClassForType(bool Vector, Type *Ty=nullptr) const
virtual std::optional< unsigned > getVScaleForTuning() const
virtual InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const
virtual InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const
virtual unsigned getNumberOfParts(Type *Tp) const
virtual bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) const
virtual bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const
virtual void getPeelingPreferences(Loop *, ScalarEvolution &, TTI::PeelingPreferences &) const
virtual std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
virtual bool useColdCCForColdCall(Function &F) const
virtual unsigned getNumberOfRegisters(unsigned ClassID) const
virtual bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const
virtual APInt getAddrSpaceCastPreservedPtrMask(unsigned SrcAS, unsigned DstAS) const
virtual bool isLegalAddScalableImmediate(int64_t Imm) const
virtual bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace) const
TargetTransformInfoImplBase(TargetTransformInfoImplBase &&Arg)
virtual bool shouldPrefetchAddressSpace(unsigned AS) const
virtual bool forceScalarizeMaskedScatter(VectorType *DataType, Align Alignment) const
virtual uint64_t getMaxMemIntrinsicInlineSizeThreshold() const
virtual KnownBits computeKnownBitsAddrSpaceCast(unsigned FromAS, unsigned ToAS, const KnownBits &FromPtrBits) const
virtual unsigned getMinVectorRegisterBitWidth() const
unsigned minRequiredElementSize(const Value *Val, bool &isSigned) const
virtual bool shouldBuildLookupTablesForConstant(Constant *C) const
virtual bool isFPVectorizationPotentiallyUnsafe() const
virtual bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, ElementCount VF) const
virtual InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr) const
virtual bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask) const
virtual InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const
virtual std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const
virtual InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const
virtual bool shouldTreatInstructionLikeSelect(const Instruction *I) const
virtual std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
virtual unsigned getEpilogueVectorizationMinVF() const
virtual std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
virtual bool shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const
virtual void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicCpySize) const
virtual unsigned getStoreMinimumVF(unsigned VF, Type *, Type *, Align, unsigned) const
virtual InstructionCost getRegisterClassReloadCost(unsigned ClassID, TTI::TargetCostKind CostKind) const
virtual TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const
virtual TTI::AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const
virtual bool forceScalarizeMaskedGather(VectorType *DataType, Align Alignment) const
virtual unsigned getMaxPrefetchIterationsAhead() const
virtual bool allowVectorElementIndexingUsingGEP() const
virtual bool isUniform(const Instruction *I, const SmallBitVector &UniformArgs) const
virtual InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) const
virtual TTI::ReductionShuffle getPreferredExpandedReductionShuffle(const IntrinsicInst *II) const
const SCEVConstant * getConstantStrideStep(ScalarEvolution *SE, const SCEV *Ptr) const
virtual bool hasBranchDivergence(const Function *F=nullptr) const
virtual InstructionCost getArithmeticReductionCost(unsigned, VectorType *, std::optional< FastMathFlags > FMF, TTI::TargetCostKind) const
virtual bool isProfitableToHoist(Instruction *I) const
virtual const char * getRegisterClassName(unsigned ClassID) const
virtual InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *, FastMathFlags, TTI::TargetCostKind) const
virtual bool isLegalToVectorizeLoad(LoadInst *LI) const
virtual bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const
virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const
virtual InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind) const
virtual unsigned getInlineCallPenalty(const Function *F, const CallBase &Call, unsigned DefaultCallPenalty) const
virtual InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
virtual bool isVectorShiftByScalarCheap(Type *Ty) const
virtual bool isLegalNTStore(Type *DataType, Align Alignment) const
virtual APInt getFeatureMask(const Function &F) const
virtual InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const
virtual std::optional< unsigned > getMinPageSize() const
virtual bool shouldCopyAttributeWhenOutliningFrom(const Function *Caller, const Attribute &Attr) const
virtual unsigned getRegUsageForType(Type *Ty) const
virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) const
virtual InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
virtual bool isElementTypeLegalForScalableVector(Type *Ty) const
virtual bool isLoweredToCall(const Function *F) const
virtual bool isLegalMaskedScatter(Type *DataType, Align Alignment) const
virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const
virtual InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Scalar, ArrayRef< std::tuple< Value *, User *, int > > ScalarUserAndIdx, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
virtual InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info, TTI::OperandValueInfo Opd2Info, ArrayRef< const Value * > Args, const Instruction *CxtI=nullptr) const
virtual InstructionCost getRegisterClassSpillCost(unsigned ClassID, TTI::TargetCostKind CostKind) const
virtual bool isIndexedStoreLegal(TTI::MemIndexedMode Mode, Type *Ty) const
virtual InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const
virtual BranchProbability getPredictableBranchThreshold() const
virtual bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
virtual InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const
virtual bool isLegalToVectorizeStore(StoreInst *SI) const
virtual bool areInlineCompatible(const Function *Caller, const Function *Callee) const
virtual bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const
virtual bool hasConditionalLoadStoreForType(Type *Ty, bool IsStore) const
virtual bool canSaveCmp(Loop *L, CondBrInst **BI, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, TargetLibraryInfo *LibInfo) const
virtual bool preferInLoopReduction(RecurKind Kind, Type *Ty) const
virtual bool isMultiversionedFunction(const Function &F) const
virtual InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const
virtual bool isNoopAddrSpaceCast(unsigned, unsigned) const
virtual InstructionUniformity getInstructionUniformity(const Value *V) const
virtual bool isExpensiveToSpeculativelyExecute(const Instruction *I) const
virtual bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const
virtual bool isLegalMaskedVectorHistogram(Type *AddrType, Type *DataType) const
virtual bool isLegalMaskedGather(Type *DataType, Align Alignment) const
virtual unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
virtual bool isLegalAddImmediate(int64_t Imm) const
virtual InstructionCost getInsertExtractValueCost(unsigned Opcode, TTI::TargetCostKind CostKind) const
virtual InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I) const
virtual bool isLegalNTLoad(Type *DataType, Align Alignment) const
virtual InstructionCost getBranchMispredictPenalty() const
virtual bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const
virtual InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const
virtual InstructionCost getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty) const
bool isConstantStridedAccessLessThan(ScalarEvolution *SE, const SCEV *Ptr, int64_t MergeDistance) const
virtual Value * getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType, bool CanCreate=true) const
virtual bool enableMaskedInterleavedAccessVectorization() const
virtual std::pair< KnownBits, KnownBits > computeKnownBitsAddrSpaceCast(unsigned ToAS, const Value &PtrOp) const
virtual Type * getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicElementSize) const
virtual unsigned getInliningThresholdMultiplier() const
TargetTransformInfoImplBase(const DataLayout &DL)
virtual InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
virtual InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, const Instruction *I) const
virtual bool shouldExpandReduction(const IntrinsicInst *II) const
virtual bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
virtual unsigned getGISelRematGlobalCost() const
virtual InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) const
virtual bool isTypeLegal(Type *Ty) const
virtual unsigned getAssumedAddrSpace(const Value *V) const
virtual bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const
virtual unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
virtual InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const
virtual unsigned getInliningCostBenefitAnalysisSavingsMultiplier() const
virtual bool areTypesABICompatible(const Function *Caller, const Function *Callee, ArrayRef< Type * > Types) const
virtual unsigned getNumBytesToPadGlobalArray(unsigned Size, Type *ArrayType) const
virtual bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const
virtual InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const
virtual bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const
virtual bool supportsTailCallFor(const CallBase *CB) const
virtual std::optional< unsigned > getMaxVScale() const
virtual bool shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const
virtual InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
virtual bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const
virtual bool shouldConsiderVectorizationRegPressure() const
virtual InstructionCost getMemcpyCost(const Instruction *I) const
virtual unsigned getInliningCostBenefitAnalysisProfitableMultiplier() const
virtual bool useFastCCForInternalCall(Function &F) const
virtual bool preferEpilogueVectorization(ElementCount Iters) const
virtual void getUnrollingPreferences(Loop *, ScalarEvolution &, TTI::UnrollingPreferences &, OptimizationRemarkEmitter *) const
TargetTransformInfoImplBase(const TargetTransformInfoImplBase &Arg)=default
virtual bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const
virtual bool supportsEfficientVectorElementLoadStore() const
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
virtual APInt getPriorityMask(const Function &F) const
virtual unsigned getMinTripCountTailFoldingThreshold() const
virtual TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
virtual void collectKernelLaunchBounds(const Function &F, SmallVectorImpl< std::pair< StringRef, int64_t > > &LB) const
bool supportsTailCallFor(const CallBase *CB) const override
bool isExpensiveToSpeculativelyExecute(const Instruction *I) const override
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) const override
InstructionCost getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const TTI::PointersChainInfo &Info, Type *AccessTy, TTI::TargetCostKind CostKind) const override
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
static LLVM_ABI CastContextHint getCastContextHint(const Instruction *I)
Calculates a CastContextHint from I.
VectorInstrContext
Represents a hint about the context in which an insert/extract is used.
@ None
The insert/extract is not used with a load/store.
MaskKind
Some targets only support masked load/store with a constant mask.
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCK_Latency
The latency of instruction.
PopcntSupportKind
Flags indicating the kind of support for population count.
@ TCC_Expensive
The cost of a 'div' instruction on x86.
@ TCC_Free
Expected to fold away in lowering.
@ TCC_Basic
The cost of a typical 'add' instruction.
MemIndexedMode
The type of load/store indexing.
AddressingModeKind
Which addressing mode Loop Strength Reduction will try to generate.
@ AMK_None
Don't prefer any addressing mode.
static VectorInstrContext getVectorInstrContextHint(const Instruction *I)
Calculates a VectorInstrContext from I.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
@ SK_InsertSubvector
InsertSubvector. Index indicates start offset.
@ SK_Select
Selects elements from the corresponding lane of either source operand.
@ SK_PermuteSingleSrc
Shuffle elements of single source vector with any shuffle mask.
@ SK_Transpose
Transpose two vectors.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_PermuteTwoSrc
Merge elements from two source vectors into one with any shuffle mask.
@ SK_Reverse
Reverse the order of the vector.
@ SK_ExtractSubvector
ExtractSubvector Index indicates start offset.
CastContextHint
Represents a hint about the context in which a cast is used.
CacheLevel
The possible cache levels.
This class represents a truncation of integer types.
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
Definition TypeSize.h:340
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:65
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:287
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:317
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:713
Base class of all SIMD vector types.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
bool match(Val *V, const Pattern &P)
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
This is an optimization pass for GlobalISel generic memory operations.
@ Length
Definition DWP.cpp:532
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
InstructionCost Cost
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2554
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t DataSize
Definition InstrProf.h:299
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr int PoisonMaskElem
RecurKind
These are the kinds of recurrences that we support.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
gep_type_iterator gep_type_begin(const User *GEP)
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
InstructionUniformity
Enum describing how instructions behave with respect to uniformity and divergence,...
Definition Uniformity.h:18
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Attributes of a target dependent hardware loop.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
Definition KnownBits.h:192
Information about a load/store intrinsic defined by the target.
Returns options for expansion of memcmp. IsZeroCmp is.
Describe known properties for a set of pointers.
Parameters that control the generic loop unrolling transformation.