LLVM 23.0.0git
VPlanRecipes.cpp
Go to the documentation of this file.
1//===- VPlanRecipes.cpp - Implementations for VPlan recipes ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file contains implementations for different VPlan recipes.
11///
12//===----------------------------------------------------------------------===//
13
15#include "VPlan.h"
16#include "VPlanAnalysis.h"
17#include "VPlanHelpers.h"
18#include "VPlanPatternMatch.h"
19#include "VPlanUtils.h"
20#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/Twine.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/IRBuilder.h"
30#include "llvm/IR/Instruction.h"
32#include "llvm/IR/Intrinsics.h"
33#include "llvm/IR/Type.h"
34#include "llvm/IR/Value.h"
37#include "llvm/Support/Debug.h"
41#include <cassert>
42
43using namespace llvm;
44using namespace llvm::VPlanPatternMatch;
45
47
48#define LV_NAME "loop-vectorize"
49#define DEBUG_TYPE LV_NAME
50
52 switch (getVPRecipeID()) {
53 case VPExpressionSC:
54 return cast<VPExpressionRecipe>(this)->mayReadOrWriteMemory();
55 case VPInstructionSC: {
56 auto *VPI = cast<VPInstruction>(this);
57 // Loads read from memory but don't write to memory.
58 if (VPI->getOpcode() == Instruction::Load)
59 return false;
60 return VPI->opcodeMayReadOrWriteFromMemory();
61 }
62 case VPInterleaveEVLSC:
63 case VPInterleaveSC:
64 return cast<VPInterleaveBase>(this)->getNumStoreOperands() > 0;
65 case VPWidenStoreEVLSC:
66 case VPWidenStoreSC:
67 return true;
68 case VPReplicateSC:
69 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue())
70 ->mayWriteToMemory();
71 case VPWidenCallSC:
72 return !cast<VPWidenCallRecipe>(this)
73 ->getCalledScalarFunction()
74 ->onlyReadsMemory();
75 case VPWidenIntrinsicSC:
76 return cast<VPWidenIntrinsicRecipe>(this)->mayWriteToMemory();
77 case VPActiveLaneMaskPHISC:
78 case VPCurrentIterationPHISC:
79 case VPBranchOnMaskSC:
80 case VPDerivedIVSC:
81 case VPFirstOrderRecurrencePHISC:
82 case VPReductionPHISC:
83 case VPScalarIVStepsSC:
84 case VPPredInstPHISC:
85 return false;
86 case VPBlendSC:
87 case VPReductionEVLSC:
88 case VPReductionSC:
89 case VPVectorPointerSC:
90 case VPWidenCanonicalIVSC:
91 case VPWidenCastSC:
92 case VPWidenGEPSC:
93 case VPWidenIntOrFpInductionSC:
94 case VPWidenLoadEVLSC:
95 case VPWidenLoadSC:
96 case VPWidenPHISC:
97 case VPWidenPointerInductionSC:
98 case VPWidenSC: {
99 const Instruction *I =
100 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
101 (void)I;
102 assert((!I || !I->mayWriteToMemory()) &&
103 "underlying instruction may write to memory");
104 return false;
105 }
106 default:
107 return true;
108 }
109}
110
112 switch (getVPRecipeID()) {
113 case VPExpressionSC:
114 return cast<VPExpressionRecipe>(this)->mayReadOrWriteMemory();
115 case VPInstructionSC:
116 return cast<VPInstruction>(this)->opcodeMayReadOrWriteFromMemory();
117 case VPWidenLoadEVLSC:
118 case VPWidenLoadSC:
119 return true;
120 case VPReplicateSC:
121 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue())
122 ->mayReadFromMemory();
123 case VPWidenCallSC:
124 return !cast<VPWidenCallRecipe>(this)
125 ->getCalledScalarFunction()
126 ->onlyWritesMemory();
127 case VPWidenIntrinsicSC:
128 return cast<VPWidenIntrinsicRecipe>(this)->mayReadFromMemory();
129 case VPBranchOnMaskSC:
130 case VPDerivedIVSC:
131 case VPCurrentIterationPHISC:
132 case VPFirstOrderRecurrencePHISC:
133 case VPReductionPHISC:
134 case VPPredInstPHISC:
135 case VPScalarIVStepsSC:
136 case VPWidenStoreEVLSC:
137 case VPWidenStoreSC:
138 return false;
139 case VPBlendSC:
140 case VPReductionEVLSC:
141 case VPReductionSC:
142 case VPVectorPointerSC:
143 case VPWidenCanonicalIVSC:
144 case VPWidenCastSC:
145 case VPWidenGEPSC:
146 case VPWidenIntOrFpInductionSC:
147 case VPWidenPHISC:
148 case VPWidenPointerInductionSC:
149 case VPWidenSC: {
150 const Instruction *I =
151 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
152 (void)I;
153 assert((!I || !I->mayReadFromMemory()) &&
154 "underlying instruction may read from memory");
155 return false;
156 }
157 default:
158 // FIXME: Return false if the recipe represents an interleaved store.
159 return true;
160 }
161}
162
164 switch (getVPRecipeID()) {
165 case VPExpressionSC:
166 return cast<VPExpressionRecipe>(this)->mayHaveSideEffects();
167 case VPActiveLaneMaskPHISC:
168 case VPDerivedIVSC:
169 case VPCurrentIterationPHISC:
170 case VPFirstOrderRecurrencePHISC:
171 case VPReductionPHISC:
172 case VPPredInstPHISC:
173 case VPVectorEndPointerSC:
174 return false;
175 case VPInstructionSC: {
176 auto *VPI = cast<VPInstruction>(this);
177 return mayWriteToMemory() ||
178 VPI->getOpcode() == VPInstruction::BranchOnCount ||
179 VPI->getOpcode() == VPInstruction::BranchOnCond ||
180 VPI->getOpcode() == VPInstruction::BranchOnTwoConds;
181 }
182 case VPWidenCallSC: {
183 Function *Fn = cast<VPWidenCallRecipe>(this)->getCalledScalarFunction();
184 return mayWriteToMemory() || !Fn->doesNotThrow() || !Fn->willReturn();
185 }
186 case VPWidenIntrinsicSC:
187 return cast<VPWidenIntrinsicRecipe>(this)->mayHaveSideEffects();
188 case VPBlendSC:
189 case VPReductionEVLSC:
190 case VPReductionSC:
191 case VPScalarIVStepsSC:
192 case VPVectorPointerSC:
193 case VPWidenCanonicalIVSC:
194 case VPWidenCastSC:
195 case VPWidenGEPSC:
196 case VPWidenIntOrFpInductionSC:
197 case VPWidenPHISC:
198 case VPWidenPointerInductionSC:
199 case VPWidenSC: {
200 const Instruction *I =
201 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
202 (void)I;
203 assert((!I || !I->mayHaveSideEffects()) &&
204 "underlying instruction has side-effects");
205 return false;
206 }
207 case VPInterleaveEVLSC:
208 case VPInterleaveSC:
209 return mayWriteToMemory();
210 case VPWidenLoadEVLSC:
211 case VPWidenLoadSC:
212 case VPWidenStoreEVLSC:
213 case VPWidenStoreSC:
214 assert(
215 cast<VPWidenMemoryRecipe>(this)->getIngredient().mayHaveSideEffects() ==
217 "mayHaveSideffects result for ingredient differs from this "
218 "implementation");
219 return mayWriteToMemory();
220 case VPReplicateSC: {
221 auto *R = cast<VPReplicateRecipe>(this);
222 return R->getUnderlyingInstr()->mayHaveSideEffects();
223 }
224 default:
225 return true;
226 }
227}
228
230 switch (getVPRecipeID()) {
231 default:
232 return false;
233 case VPInstructionSC: {
234 unsigned Opcode = cast<VPInstruction>(this)->getOpcode();
235 if (Instruction::isCast(Opcode))
236 return true;
237
238 switch (Opcode) {
239 default:
240 return false;
241 case Instruction::Add:
242 case Instruction::Sub:
243 case Instruction::Mul:
244 case Instruction::GetElementPtr:
245 return true;
246 }
247 }
248 }
249}
250
252 assert(!Parent && "Recipe already in some VPBasicBlock");
253 assert(InsertPos->getParent() &&
254 "Insertion position not in any VPBasicBlock");
255 InsertPos->getParent()->insert(this, InsertPos->getIterator());
256}
257
258void VPRecipeBase::insertBefore(VPBasicBlock &BB,
260 assert(!Parent && "Recipe already in some VPBasicBlock");
261 assert(I == BB.end() || I->getParent() == &BB);
262 BB.insert(this, I);
263}
264
266 assert(!Parent && "Recipe already in some VPBasicBlock");
267 assert(InsertPos->getParent() &&
268 "Insertion position not in any VPBasicBlock");
269 InsertPos->getParent()->insert(this, std::next(InsertPos->getIterator()));
270}
271
273 assert(getParent() && "Recipe not in any VPBasicBlock");
275 Parent = nullptr;
276}
277
279 assert(getParent() && "Recipe not in any VPBasicBlock");
281}
282
285 insertAfter(InsertPos);
286}
287
293
295 // Get the underlying instruction for the recipe, if there is one. It is used
296 // to
297 // * decide if cost computation should be skipped for this recipe,
298 // * apply forced target instruction cost.
299 Instruction *UI = nullptr;
300 if (auto *S = dyn_cast<VPSingleDefRecipe>(this))
301 UI = dyn_cast_or_null<Instruction>(S->getUnderlyingValue());
302 else if (auto *IG = dyn_cast<VPInterleaveBase>(this))
303 UI = IG->getInsertPos();
304 else if (auto *WidenMem = dyn_cast<VPWidenMemoryRecipe>(this))
305 UI = &WidenMem->getIngredient();
306
307 InstructionCost RecipeCost;
308 if (UI && Ctx.skipCostComputation(UI, VF.isVector())) {
309 RecipeCost = 0;
310 } else {
311 RecipeCost = computeCost(VF, Ctx);
312 if (ForceTargetInstructionCost.getNumOccurrences() > 0 &&
313 RecipeCost.isValid()) {
314 if (UI)
316 else
317 RecipeCost = InstructionCost(0);
318 }
319 }
320
321 LLVM_DEBUG({
322 dbgs() << "Cost of " << RecipeCost << " for VF " << VF << ": ";
323 dump();
324 });
325 return RecipeCost;
326}
327
329 VPCostContext &Ctx) const {
330 llvm_unreachable("subclasses should implement computeCost");
331}
332
334 return (getVPRecipeID() >= VPFirstPHISC && getVPRecipeID() <= VPLastPHISC) ||
336}
337
339 auto *VPI = dyn_cast<VPInstruction>(this);
340 return VPI && Instruction::isCast(VPI->getOpcode());
341}
342
344 assert(OpType == Other.OpType && "OpType must match");
345 switch (OpType) {
346 case OperationType::OverflowingBinOp:
347 WrapFlags.HasNUW &= Other.WrapFlags.HasNUW;
348 WrapFlags.HasNSW &= Other.WrapFlags.HasNSW;
349 break;
350 case OperationType::Trunc:
351 TruncFlags.HasNUW &= Other.TruncFlags.HasNUW;
352 TruncFlags.HasNSW &= Other.TruncFlags.HasNSW;
353 break;
354 case OperationType::DisjointOp:
355 DisjointFlags.IsDisjoint &= Other.DisjointFlags.IsDisjoint;
356 break;
357 case OperationType::PossiblyExactOp:
358 ExactFlags.IsExact &= Other.ExactFlags.IsExact;
359 break;
360 case OperationType::GEPOp:
361 GEPFlagsStorage &= Other.GEPFlagsStorage;
362 break;
363 case OperationType::FPMathOp:
364 case OperationType::FCmp:
365 assert((OpType != OperationType::FCmp ||
366 FCmpFlags.CmpPredStorage == Other.FCmpFlags.CmpPredStorage) &&
367 "Cannot drop CmpPredicate");
368 getFMFsRef().NoNaNs &= Other.getFMFsRef().NoNaNs;
369 getFMFsRef().NoInfs &= Other.getFMFsRef().NoInfs;
370 break;
371 case OperationType::NonNegOp:
372 NonNegFlags.NonNeg &= Other.NonNegFlags.NonNeg;
373 break;
374 case OperationType::Cmp:
375 assert(CmpPredStorage == Other.CmpPredStorage &&
376 "Cannot drop CmpPredicate");
377 break;
378 case OperationType::ReductionOp:
379 assert(ReductionFlags.Kind == Other.ReductionFlags.Kind &&
380 "Cannot change RecurKind");
381 assert(ReductionFlags.IsOrdered == Other.ReductionFlags.IsOrdered &&
382 "Cannot change IsOrdered");
383 assert(ReductionFlags.IsInLoop == Other.ReductionFlags.IsInLoop &&
384 "Cannot change IsInLoop");
385 getFMFsRef().NoNaNs &= Other.getFMFsRef().NoNaNs;
386 getFMFsRef().NoInfs &= Other.getFMFsRef().NoInfs;
387 break;
388 case OperationType::Other:
389 break;
390 }
391}
392
394 assert((OpType == OperationType::FPMathOp || OpType == OperationType::FCmp ||
395 OpType == OperationType::ReductionOp ||
396 OpType == OperationType::Other) &&
397 "recipe doesn't have fast math flags");
398 if (OpType == OperationType::Other)
399 return FastMathFlags();
400 const FastMathFlagsTy &F = getFMFsRef();
401 FastMathFlags Res;
402 Res.setAllowReassoc(F.AllowReassoc);
403 Res.setNoNaNs(F.NoNaNs);
404 Res.setNoInfs(F.NoInfs);
405 Res.setNoSignedZeros(F.NoSignedZeros);
406 Res.setAllowReciprocal(F.AllowReciprocal);
407 Res.setAllowContract(F.AllowContract);
408 Res.setApproxFunc(F.ApproxFunc);
409 return Res;
410}
411
412#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
414
415void VPRecipeBase::print(raw_ostream &O, const Twine &Indent,
416 VPSlotTracker &SlotTracker) const {
417 printRecipe(O, Indent, SlotTracker);
418 if (auto DL = getDebugLoc()) {
419 O << ", !dbg ";
420 DL.print(O);
421 }
422
423 if (auto *Metadata = dyn_cast<VPIRMetadata>(this))
425}
426#endif
427
428template <unsigned PartOpIdx>
429VPValue *
431 if (U.getNumOperands() == PartOpIdx + 1)
432 return U.getOperand(PartOpIdx);
433 return nullptr;
434}
435
436template <unsigned PartOpIdx>
438 if (auto *UnrollPartOp = getUnrollPartOperand(U))
439 return cast<VPConstantInt>(UnrollPartOp)->getZExtValue();
440 return 0;
441}
442
443namespace llvm {
444template class VPUnrollPartAccessor<1>;
445template class VPUnrollPartAccessor<2>;
446template class VPUnrollPartAccessor<3>;
447}
448
450 const VPIRFlags &Flags, const VPIRMetadata &MD,
451 DebugLoc DL, const Twine &Name)
452 : VPRecipeWithIRFlags(VPRecipeBase::VPInstructionSC, Operands, Flags, DL),
453 VPIRMetadata(MD), Opcode(Opcode), Name(Name.str()) {
455 "Set flags not supported for the provided opcode");
457 "Opcode requires specific flags to be set");
461 "number of operands does not match opcode");
462}
463
464/// For call VPInstructions, return the operand index of the called function.
465/// The function is either the last operand (for unmasked calls) or the
466/// second-to-last operand (for masked calls).
467static unsigned getCalledFnOperandIndex(const VPInstruction &VPI) {
468 assert(VPI.getOpcode() == Instruction::Call && "must be a call");
469 unsigned NumOps = VPI.getNumOperands();
470 auto *LastOp = dyn_cast<VPIRValue>(VPI.getOperand(NumOps - 1));
471 if (LastOp && isa<Function>(LastOp->getValue()))
472 return NumOps - 1;
473 assert(
474 isa<Function>(cast<VPIRValue>(VPI.getOperand(NumOps - 2))->getValue()) &&
475 "expected function operand");
476 return NumOps - 2;
477}
478
479/// For call VPInstructions, return the called function.
481 unsigned Idx = getCalledFnOperandIndex(VPI);
482 return cast<Function>(cast<VPIRValue>(VPI.getOperand(Idx))->getValue());
483}
484
486 if (Instruction::isUnaryOp(Opcode) || Instruction::isCast(Opcode))
487 return 1;
488
489 if (Instruction::isBinaryOp(Opcode))
490 return 2;
491
492 switch (Opcode) {
495 return 0;
496 case Instruction::Alloca:
497 case Instruction::ExtractValue:
498 case Instruction::Freeze:
499 case Instruction::Load:
512 return 1;
513 case Instruction::ICmp:
514 case Instruction::FCmp:
515 case Instruction::ExtractElement:
516 case Instruction::Store:
526 return 2;
527 case Instruction::InsertElement:
528 case Instruction::Select:
531 return 3;
532 case Instruction::Call:
533 return getCalledFnOperandIndex(*this) + 1;
534 case Instruction::GetElementPtr:
535 case Instruction::PHI:
536 case Instruction::Switch:
546 // Cannot determine the number of operands from the opcode.
547 return -1u;
548 }
549 llvm_unreachable("all cases should be handled above");
550}
551
555
556bool VPInstruction::canGenerateScalarForFirstLane() const {
558 return true;
560 return true;
561 switch (Opcode) {
562 case Instruction::Freeze:
563 case Instruction::ICmp:
564 case Instruction::PHI:
565 case Instruction::Select:
575 return true;
576 default:
577 return false;
578 }
579}
580
582 if (Kind == RecurKind::Sub)
583 return Instruction::Add;
584 if (Kind == RecurKind::FSub)
585 return Instruction::FAdd;
586 llvm_unreachable("RecurKind should be Sub/FSub.");
587}
588
589Value *VPInstruction::generate(VPTransformState &State) {
590 IRBuilderBase &Builder = State.Builder;
591
593 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
594 Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
595 Value *B = State.get(getOperand(1), OnlyFirstLaneUsed);
596 auto *Res =
597 Builder.CreateBinOp((Instruction::BinaryOps)getOpcode(), A, B, Name);
598 if (auto *I = dyn_cast<Instruction>(Res))
599 applyFlags(*I);
600 return Res;
601 }
602
603 switch (getOpcode()) {
604 case VPInstruction::Not: {
605 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
606 Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
607 return Builder.CreateNot(A, Name);
608 }
609 case Instruction::ExtractElement: {
610 assert(State.VF.isVector() && "Only extract elements from vectors");
611 if (auto *Idx = dyn_cast<VPConstantInt>(getOperand(1)))
612 return State.get(getOperand(0), VPLane(Idx->getZExtValue()));
613 Value *Vec = State.get(getOperand(0));
614 Value *Idx = State.get(getOperand(1), /*IsScalar=*/true);
615 return Builder.CreateExtractElement(Vec, Idx, Name);
616 }
617 case Instruction::InsertElement: {
618 assert(State.VF.isVector() && "Can only insert elements into vectors");
619 Value *Vec = State.get(getOperand(0), /*IsScalar=*/false);
620 Value *Elt = State.get(getOperand(1), /*IsScalar=*/true);
621 Value *Idx = State.get(getOperand(2), /*IsScalar=*/true);
622 return Builder.CreateInsertElement(Vec, Elt, Idx, Name);
623 }
624 case Instruction::Freeze: {
626 return Builder.CreateFreeze(Op, Name);
627 }
628 case Instruction::FCmp:
629 case Instruction::ICmp: {
630 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
631 Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
632 Value *B = State.get(getOperand(1), OnlyFirstLaneUsed);
633 return Builder.CreateCmp(getPredicate(), A, B, Name);
634 }
635 case Instruction::PHI: {
636 llvm_unreachable("should be handled by VPPhi::execute");
637 }
638 case Instruction::Select: {
639 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
640 Value *Cond =
641 State.get(getOperand(0),
642 OnlyFirstLaneUsed || vputils::isSingleScalar(getOperand(0)));
643 Value *Op1 = State.get(getOperand(1), OnlyFirstLaneUsed);
644 Value *Op2 = State.get(getOperand(2), OnlyFirstLaneUsed);
645 return Builder.CreateSelectFMF(Cond, Op1, Op2, getFastMathFlags(), Name);
646 }
648 // Get first lane of vector induction variable.
649 Value *VIVElem0 = State.get(getOperand(0), VPLane(0));
650 // Get the original loop tripcount.
651 Value *ScalarTC = State.get(getOperand(1), VPLane(0));
652
653 // If this part of the active lane mask is scalar, generate the CMP directly
654 // to avoid unnecessary extracts.
655 if (State.VF.isScalar())
656 return Builder.CreateCmp(CmpInst::Predicate::ICMP_ULT, VIVElem0, ScalarTC,
657 Name);
658
659 ElementCount EC = State.VF.multiplyCoefficientBy(
660 cast<VPConstantInt>(getOperand(2))->getZExtValue());
661 auto *PredTy = VectorType::get(Builder.getInt1Ty(), EC);
662 return Builder.CreateIntrinsic(Intrinsic::get_active_lane_mask,
663 {PredTy, ScalarTC->getType()},
664 {VIVElem0, ScalarTC}, nullptr, Name);
665 }
667 // Generate code to combine the previous and current values in vector v3.
668 //
669 // vector.ph:
670 // v_init = vector(..., ..., ..., a[-1])
671 // br vector.body
672 //
673 // vector.body
674 // i = phi [0, vector.ph], [i+4, vector.body]
675 // v1 = phi [v_init, vector.ph], [v2, vector.body]
676 // v2 = a[i, i+1, i+2, i+3];
677 // v3 = vector(v1(3), v2(0, 1, 2))
678
679 auto *V1 = State.get(getOperand(0));
680 if (!V1->getType()->isVectorTy())
681 return V1;
682 Value *V2 = State.get(getOperand(1));
683 return Builder.CreateVectorSpliceRight(V1, V2, 1, Name);
684 }
686 Value *ScalarTC = State.get(getOperand(0), VPLane(0));
687 Value *VFxUF = State.get(getOperand(1), VPLane(0));
688 Value *Sub = Builder.CreateSub(ScalarTC, VFxUF);
689 Value *Cmp =
690 Builder.CreateICmp(CmpInst::Predicate::ICMP_UGT, ScalarTC, VFxUF);
692 return Builder.CreateSelect(Cmp, Sub, Zero);
693 }
695 // TODO: Restructure this code with an explicit remainder loop, vsetvli can
696 // be outside of the main loop.
697 Value *AVL = State.get(getOperand(0), /*IsScalar*/ true);
698 // Compute EVL
699 assert(AVL->getType()->isIntegerTy() &&
700 "Requested vector length should be an integer.");
701
702 assert(State.VF.isScalable() && "Expected scalable vector factor.");
703 Value *VFArg = Builder.getInt32(State.VF.getKnownMinValue());
704
705 Value *EVL = Builder.CreateIntrinsic(
706 Builder.getInt32Ty(), Intrinsic::experimental_get_vector_length,
707 {AVL, VFArg, Builder.getTrue()});
708 return EVL;
709 }
711 Value *Cond = State.get(getOperand(0), VPLane(0));
712 // Replace the temporary unreachable terminator with a new conditional
713 // branch, hooking it up to backward destination for latch blocks now, and
714 // to forward destination(s) later when they are created.
715 // Second successor may be backwards - iff it is already in VPBB2IRBB.
716 VPBasicBlock *SecondVPSucc =
717 cast<VPBasicBlock>(getParent()->getSuccessors()[1]);
718 BasicBlock *SecondIRSucc = State.CFG.VPBB2IRBB.lookup(SecondVPSucc);
719 BasicBlock *IRBB = State.CFG.VPBB2IRBB[getParent()];
720 auto *Br = Builder.CreateCondBr(Cond, IRBB, SecondIRSucc);
721 // First successor is always forward, reset it to nullptr.
722 Br->setSuccessor(0, nullptr);
724 applyMetadata(*Br);
725 return Br;
726 }
728 return Builder.CreateVectorSplat(
729 State.VF, State.get(getOperand(0), /*IsScalar*/ true), "broadcast");
730 }
732 // For struct types, we need to build a new 'wide' struct type, where each
733 // element is widened, i.e., we create a struct of vectors.
734 auto *StructTy =
736 Value *Res = PoisonValue::get(toVectorizedTy(StructTy, State.VF));
737 for (const auto &[LaneIndex, Op] : enumerate(operands())) {
738 for (unsigned FieldIndex = 0; FieldIndex != StructTy->getNumElements();
739 FieldIndex++) {
740 Value *ScalarValue =
741 Builder.CreateExtractValue(State.get(Op, true), FieldIndex);
742 Value *VectorValue = Builder.CreateExtractValue(Res, FieldIndex);
743 VectorValue =
744 Builder.CreateInsertElement(VectorValue, ScalarValue, LaneIndex);
745 Res = Builder.CreateInsertValue(Res, VectorValue, FieldIndex);
746 }
747 }
748 return Res;
749 }
751 auto *ScalarTy = State.TypeAnalysis.inferScalarType(getOperand(0));
752 auto NumOfElements = ElementCount::getFixed(getNumOperands());
753 Value *Res = PoisonValue::get(toVectorizedTy(ScalarTy, NumOfElements));
754 for (const auto &[Idx, Op] : enumerate(operands()))
755 Res = Builder.CreateInsertElement(Res, State.get(Op, true),
756 Builder.getInt32(Idx));
757 return Res;
758 }
760 if (State.VF.isScalar())
761 return State.get(getOperand(0), true);
762 IRBuilderBase::FastMathFlagGuard FMFG(Builder);
764 // If this start vector is scaled then it should produce a vector with fewer
765 // elements than the VF.
766 ElementCount VF = State.VF.divideCoefficientBy(
767 cast<VPConstantInt>(getOperand(2))->getZExtValue());
768 auto *Iden = Builder.CreateVectorSplat(VF, State.get(getOperand(1), true));
769 return Builder.CreateInsertElement(Iden, State.get(getOperand(0), true),
770 Builder.getInt32(0));
771 }
773 RecurKind RK = getRecurKind();
774 bool IsOrdered = isReductionOrdered();
775 bool IsInLoop = isReductionInLoop();
777 "FindIV should use min/max reduction kinds");
778
779 // The recipe may have multiple operands to be reduced together.
780 unsigned NumOperandsToReduce = getNumOperands();
781 VectorParts RdxParts(NumOperandsToReduce);
782 for (unsigned Part = 0; Part < NumOperandsToReduce; ++Part)
783 RdxParts[Part] = State.get(getOperand(Part), IsInLoop);
784
785 IRBuilderBase::FastMathFlagGuard FMFG(Builder);
787
788 // Reduce multiple operands into one.
789 Value *ReducedPartRdx = RdxParts[0];
790 if (IsOrdered) {
791 ReducedPartRdx = RdxParts[NumOperandsToReduce - 1];
792 } else {
793 // Floating-point operations should have some FMF to enable the reduction.
794 for (unsigned Part = 1; Part < NumOperandsToReduce; ++Part) {
795 Value *RdxPart = RdxParts[Part];
797 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
798 else {
799 // For sub-recurrences, each part's reduction variable is already
800 // negative, we need to do: reduce.add(-acc_uf0 + -acc_uf1)
804 : (Instruction::BinaryOps)RecurrenceDescriptor::getOpcode(RK);
805 ReducedPartRdx =
806 Builder.CreateBinOp(Opcode, RdxPart, ReducedPartRdx, "bin.rdx");
807 }
808 }
809 }
810
811 // Create the reduction after the loop. Note that inloop reductions create
812 // the target reduction in the loop using a Reduction recipe.
813 if (State.VF.isVector() && !IsInLoop) {
814 // TODO: Support in-order reductions based on the recurrence descriptor.
815 // All ops in the reduction inherit fast-math-flags from the recurrence
816 // descriptor.
817 ReducedPartRdx = createSimpleReduction(Builder, ReducedPartRdx, RK);
818 }
819
820 return ReducedPartRdx;
821 }
824 unsigned Offset =
826 Value *Res;
827 if (State.VF.isVector()) {
828 assert(Offset <= State.VF.getKnownMinValue() &&
829 "invalid offset to extract from");
830 // Extract lane VF - Offset from the operand.
831 Res = State.get(getOperand(0), VPLane::getLaneFromEnd(State.VF, Offset));
832 } else {
833 // TODO: Remove ExtractLastLane for scalar VFs.
834 assert(Offset <= 1 && "invalid offset to extract from");
835 Res = State.get(getOperand(0));
836 }
838 Res->setName(Name);
839 return Res;
840 }
842 Value *A = State.get(getOperand(0));
843 Value *B = State.get(getOperand(1));
844 return Builder.CreateLogicalAnd(A, B, Name);
845 }
847 Value *A = State.get(getOperand(0));
848 Value *B = State.get(getOperand(1));
849 return Builder.CreateLogicalOr(A, B, Name);
850 }
852 assert((State.VF.isScalar() || vputils::onlyFirstLaneUsed(this)) &&
853 "can only generate first lane for PtrAdd");
854 Value *Ptr = State.get(getOperand(0), VPLane(0));
855 Value *Addend = State.get(getOperand(1), VPLane(0));
856 return Builder.CreatePtrAdd(Ptr, Addend, Name, getGEPNoWrapFlags());
857 }
859 Value *Ptr =
861 Value *Addend = State.get(getOperand(1));
862 return Builder.CreatePtrAdd(Ptr, Addend, Name, getGEPNoWrapFlags());
863 }
865 Value *Res = Builder.CreateFreeze(State.get(getOperand(0)));
866 for (VPValue *Op : drop_begin(operands()))
867 Res = Builder.CreateOr(Res, Builder.CreateFreeze(State.get(Op)));
868 return State.VF.isScalar() ? Res : Builder.CreateOrReduce(Res);
869 }
871 assert(getNumOperands() != 2 && "ExtractLane from single source should be "
872 "simplified to ExtractElement.");
873 Value *LaneToExtract = State.get(getOperand(0), true);
874 Type *IdxTy = State.TypeAnalysis.inferScalarType(getOperand(0));
875 Value *Res = nullptr;
876 Value *RuntimeVF = getRuntimeVF(Builder, IdxTy, State.VF);
877
878 for (unsigned Idx = 1; Idx != getNumOperands(); ++Idx) {
879 Value *VectorStart =
880 Builder.CreateMul(RuntimeVF, ConstantInt::get(IdxTy, Idx - 1));
881 Value *VectorIdx = Idx == 1
882 ? LaneToExtract
883 : Builder.CreateSub(LaneToExtract, VectorStart);
884 Value *Ext = State.VF.isScalar()
885 ? State.get(getOperand(Idx))
886 : Builder.CreateExtractElement(
887 State.get(getOperand(Idx)), VectorIdx);
888 if (Res) {
889 Value *Cmp = Builder.CreateICmpUGE(LaneToExtract, VectorStart);
890 Res = Builder.CreateSelect(Cmp, Ext, Res);
891 } else {
892 Res = Ext;
893 }
894 }
895 return Res;
896 }
898 Type *Ty = State.TypeAnalysis.inferScalarType(this);
899 if (getNumOperands() == 1) {
900 Value *Mask = State.get(getOperand(0));
901 return Builder.CreateCountTrailingZeroElems(Ty, Mask,
902 /*ZeroIsPoison=*/false, Name);
903 }
904 // If there are multiple operands, create a chain of selects to pick the
905 // first operand with an active lane and add the number of lanes of the
906 // preceding operands.
907 Value *RuntimeVF = getRuntimeVF(Builder, Ty, State.VF);
908 unsigned LastOpIdx = getNumOperands() - 1;
909 Value *Res = nullptr;
910 for (int Idx = LastOpIdx; Idx >= 0; --Idx) {
911 Value *TrailingZeros =
912 State.VF.isScalar()
913 ? Builder.CreateZExt(
914 Builder.CreateICmpEQ(State.get(getOperand(Idx)),
915 Builder.getFalse()),
916 Ty)
918 Ty, State.get(getOperand(Idx)),
919 /*ZeroIsPoison=*/false, Name);
920 Value *Current = Builder.CreateAdd(
921 Builder.CreateMul(RuntimeVF, ConstantInt::get(Ty, Idx)),
922 TrailingZeros);
923 if (Res) {
924 Value *Cmp = Builder.CreateICmpNE(TrailingZeros, RuntimeVF);
925 Res = Builder.CreateSelect(Cmp, Current, Res);
926 } else {
927 Res = Current;
928 }
929 }
930
931 return Res;
932 }
934 return State.get(getOperand(0), true);
936 return Builder.CreateVectorReverse(State.get(getOperand(0)), "reverse");
938 Value *Result = State.get(getOperand(0), /*IsScalar=*/true);
939 for (unsigned Idx = 1; Idx < getNumOperands(); Idx += 2) {
940 Value *Data = State.get(getOperand(Idx));
941 Value *Mask = State.get(getOperand(Idx + 1));
942 Type *VTy = Data->getType();
943
944 if (State.VF.isScalar())
945 Result = Builder.CreateSelect(Mask, Data, Result);
946 else
947 Result = Builder.CreateIntrinsic(
948 Intrinsic::experimental_vector_extract_last_active, {VTy},
949 {Data, Mask, Result});
950 }
951
952 return Result;
953 }
954 default:
955 llvm_unreachable("Unsupported opcode for instruction");
956 }
957}
958
960 unsigned Opcode, ElementCount VF, VPCostContext &Ctx) const {
961 Type *ScalarTy = Ctx.Types.inferScalarType(this);
962 Type *ResultTy = VF.isVector() ? toVectorTy(ScalarTy, VF) : ScalarTy;
963 switch (Opcode) {
964 case Instruction::FNeg:
965 return Ctx.TTI.getArithmeticInstrCost(Opcode, ResultTy, Ctx.CostKind);
966 case Instruction::UDiv:
967 case Instruction::SDiv:
968 case Instruction::SRem:
969 case Instruction::URem:
970 case Instruction::Add:
971 case Instruction::FAdd:
972 case Instruction::Sub:
973 case Instruction::FSub:
974 case Instruction::Mul:
975 case Instruction::FMul:
976 case Instruction::FDiv:
977 case Instruction::FRem:
978 case Instruction::Shl:
979 case Instruction::LShr:
980 case Instruction::AShr:
981 case Instruction::And:
982 case Instruction::Or:
983 case Instruction::Xor: {
984 // Certain instructions can be cheaper if they have a constant second
985 // operand. One example of this are shifts on x86.
986 VPValue *RHS = getOperand(1);
987 TargetTransformInfo::OperandValueInfo RHSInfo = Ctx.getOperandInfo(RHS);
988
989 if (RHSInfo.Kind == TargetTransformInfo::OK_AnyValue &&
992
995 if (CtxI)
996 Operands.append(CtxI->value_op_begin(), CtxI->value_op_end());
997 return Ctx.TTI.getArithmeticInstrCost(
998 Opcode, ResultTy, Ctx.CostKind,
999 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
1000 RHSInfo, Operands, CtxI, &Ctx.TLI);
1001 }
1002 case Instruction::Freeze:
1003 // NOTE: The only way to ask for the cost is via getInstructionCost, which
1004 // requires the actual vector instruction. Instead, both here and in the
1005 // LoopVectorizationCostModel::getInstructionCost the costs mirror the
1006 // current behaviour in llvm/Analysis/TargetTransformInfoImpl.h to keep
1007 // them in sync.
1008 return TTI::TCC_Free;
1009 case Instruction::ExtractValue:
1010 return Ctx.TTI.getInsertExtractValueCost(Instruction::ExtractValue,
1011 Ctx.CostKind);
1012 case Instruction::ICmp:
1013 case Instruction::FCmp: {
1014 Type *ScalarOpTy = Ctx.Types.inferScalarType(getOperand(0));
1015 Type *OpTy = VF.isVector() ? toVectorTy(ScalarOpTy, VF) : ScalarOpTy;
1017 return Ctx.TTI.getCmpSelInstrCost(
1018 Opcode, OpTy, CmpInst::makeCmpResultType(OpTy), getPredicate(),
1019 Ctx.CostKind, {TTI::OK_AnyValue, TTI::OP_None},
1020 {TTI::OK_AnyValue, TTI::OP_None}, CtxI);
1021 }
1022 case Instruction::BitCast: {
1023 Type *ScalarTy = Ctx.Types.inferScalarType(this);
1024 if (ScalarTy->isPointerTy())
1025 return 0;
1026 [[fallthrough]];
1027 }
1028 case Instruction::SExt:
1029 case Instruction::ZExt:
1030 case Instruction::FPToUI:
1031 case Instruction::FPToSI:
1032 case Instruction::FPExt:
1033 case Instruction::PtrToInt:
1034 case Instruction::PtrToAddr:
1035 case Instruction::IntToPtr:
1036 case Instruction::SIToFP:
1037 case Instruction::UIToFP:
1038 case Instruction::Trunc:
1039 case Instruction::FPTrunc:
1040 case Instruction::AddrSpaceCast: {
1041 // Computes the CastContextHint from a recipe that may access memory.
1042 auto ComputeCCH = [&](const VPRecipeBase *R) -> TTI::CastContextHint {
1043 if (isa<VPInterleaveBase>(R))
1045 if (const auto *ReplicateRecipe = dyn_cast<VPReplicateRecipe>(R)) {
1046 // Only compute CCH for memory operations, matching the legacy model
1047 // which only considers loads/stores for cast context hints.
1048 auto *UI = cast<Instruction>(ReplicateRecipe->getUnderlyingValue());
1049 if (!isa<LoadInst, StoreInst>(UI))
1051 return ReplicateRecipe->isPredicated() ? TTI::CastContextHint::Masked
1053 }
1054 const auto *WidenMemoryRecipe = dyn_cast<VPWidenMemoryRecipe>(R);
1055 if (WidenMemoryRecipe == nullptr)
1057 if (VF.isScalar())
1059 if (!WidenMemoryRecipe->isConsecutive())
1061 if (WidenMemoryRecipe->isMasked())
1064 };
1065
1066 VPValue *Operand = getOperand(0);
1068 bool IsReverse = false;
1069 // For Trunc/FPTrunc, get the context from the only user.
1070 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
1071 auto GetOnlyUser = [](const VPSingleDefRecipe *R) -> VPRecipeBase * {
1072 if (R->getNumUsers() == 0 || R->hasMoreThanOneUniqueUser())
1073 return nullptr;
1074 return dyn_cast<VPRecipeBase>(*R->user_begin());
1075 };
1076 if (VPRecipeBase *Recipe = GetOnlyUser(this)) {
1077 if (match(Recipe,
1081 Recipe = GetOnlyUser(cast<VPSingleDefRecipe>(Recipe));
1082 IsReverse = true;
1083 }
1084 if (Recipe)
1085 CCH = ComputeCCH(Recipe);
1086 }
1087 }
1088 // For Z/Sext, get the context from the operand.
1089 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
1090 Opcode == Instruction::FPExt) {
1091 if (auto *Recipe = Operand->getDefiningRecipe()) {
1092 VPValue *ReverseOp;
1093 if (match(Recipe,
1094 m_CombineOr(m_Reverse(m_VPValue(ReverseOp)),
1096 m_VPValue(ReverseOp))))) {
1097 Recipe = ReverseOp->getDefiningRecipe();
1098 IsReverse = true;
1099 }
1100 if (Recipe)
1101 CCH = ComputeCCH(Recipe);
1102 }
1103 }
1104 if (IsReverse && CCH != TTI::CastContextHint::None)
1106
1107 auto *ScalarSrcTy = Ctx.Types.inferScalarType(Operand);
1108 Type *SrcTy = VF.isVector() ? toVectorTy(ScalarSrcTy, VF) : ScalarSrcTy;
1109 // Arm TTI will use the underlying instruction to determine the cost.
1110 return Ctx.TTI.getCastInstrCost(
1111 Opcode, ResultTy, SrcTy, CCH, Ctx.CostKind,
1113 }
1114 case Instruction::Select: {
1116 bool IsScalarCond = getOperand(0)->isDefinedOutsideLoopRegions();
1117 Type *ScalarTy = Ctx.Types.inferScalarType(this);
1118
1119 VPValue *Op0, *Op1;
1120 bool IsLogicalAnd =
1121 match(this, m_c_LogicalAnd(m_VPValue(Op0), m_VPValue(Op1)));
1122 bool IsLogicalOr =
1123 match(this, m_c_LogicalOr(m_VPValue(Op0), m_VPValue(Op1)));
1124 // Also match the inverted forms:
1125 // select x, false, y --> !x & y (still AND)
1126 // select x, y, true --> !x | y (still OR)
1127 IsLogicalAnd |=
1128 match(this, m_Select(m_VPValue(Op0), m_False(), m_VPValue(Op1)));
1129 IsLogicalOr |=
1130 match(this, m_Select(m_VPValue(Op0), m_VPValue(Op1), m_True()));
1131
1132 if (!IsScalarCond && ScalarTy->getScalarSizeInBits() == 1 &&
1133 (IsLogicalAnd || IsLogicalOr)) {
1134 // select x, y, false --> x & y
1135 // select x, true, y --> x | y
1136 const auto [Op1VK, Op1VP] = Ctx.getOperandInfo(Op0);
1137 const auto [Op2VK, Op2VP] = Ctx.getOperandInfo(Op1);
1138
1140 if (SI && all_of(operands(),
1141 [](VPValue *Op) { return Op->getUnderlyingValue(); }))
1142 append_range(Operands, SI->operands());
1143 return Ctx.TTI.getArithmeticInstrCost(
1144 IsLogicalOr ? Instruction::Or : Instruction::And, ResultTy,
1145 Ctx.CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, Operands, SI);
1146 }
1147
1148 Type *CondTy = Ctx.Types.inferScalarType(getOperand(0));
1149 if (!IsScalarCond && VF.isVector())
1150 CondTy = VectorType::get(CondTy, VF);
1151
1152 llvm::CmpPredicate Pred;
1153 if (!match(getOperand(0), m_Cmp(Pred, m_VPValue(), m_VPValue())))
1154 if (auto *CondIRV = dyn_cast<VPIRValue>(getOperand(0)))
1155 if (auto *Cmp = dyn_cast<CmpInst>(CondIRV->getValue()))
1156 Pred = Cmp->getPredicate();
1157 Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
1158 return Ctx.TTI.getCmpSelInstrCost(
1159 Instruction::Select, VectorTy, CondTy, Pred, Ctx.CostKind,
1160 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, SI);
1161 }
1162 }
1163 llvm_unreachable("called for unsupported opcode");
1164}
1165
1167 VPCostContext &Ctx) const {
1169 if (!getUnderlyingValue() && getOpcode() != Instruction::FMul) {
1170 // TODO: Compute cost for VPInstructions without underlying values once
1171 // the legacy cost model has been retired.
1172 return 0;
1173 }
1174
1176 "Should only generate a vector value or single scalar, not scalars "
1177 "for all lanes.");
1179 getOpcode(),
1181 }
1182
1183 switch (getOpcode()) {
1184 case Instruction::Select: {
1186 match(getOperand(0), m_Cmp(Pred, m_VPValue(), m_VPValue()));
1187 auto *CondTy = Ctx.Types.inferScalarType(getOperand(0));
1188 auto *VecTy = Ctx.Types.inferScalarType(getOperand(1));
1189 if (!vputils::onlyFirstLaneUsed(this)) {
1190 CondTy = toVectorTy(CondTy, VF);
1191 VecTy = toVectorTy(VecTy, VF);
1192 }
1193 return Ctx.TTI.getCmpSelInstrCost(Instruction::Select, VecTy, CondTy, Pred,
1194 Ctx.CostKind);
1195 }
1196 case Instruction::ExtractElement:
1198 if (VF.isScalar()) {
1199 // ExtractLane with VF=1 takes care of handling extracting across multiple
1200 // parts.
1201 return 0;
1202 }
1203
1204 // Add on the cost of extracting the element.
1205 auto *VecTy = toVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
1206 return Ctx.TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy,
1207 Ctx.CostKind);
1208 }
1209 case VPInstruction::AnyOf: {
1210 auto *VecTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
1211 return Ctx.TTI.getArithmeticReductionCost(
1212 Instruction::Or, cast<VectorType>(VecTy), std::nullopt, Ctx.CostKind);
1213 }
1215 Type *Ty = Ctx.Types.inferScalarType(this);
1216 Type *ScalarTy = Ctx.Types.inferScalarType(getOperand(0));
1217 if (VF.isScalar())
1218 return Ctx.TTI.getCmpSelInstrCost(Instruction::ICmp, ScalarTy,
1220 CmpInst::ICMP_EQ, Ctx.CostKind);
1221 // Calculate the cost of determining the lane index.
1222 auto *PredTy = toVectorTy(ScalarTy, VF);
1223 IntrinsicCostAttributes Attrs(Intrinsic::experimental_cttz_elts, Ty,
1224 {PredTy, Type::getInt1Ty(Ctx.LLVMCtx)});
1225 return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1226 }
1228 Type *Ty = Ctx.Types.inferScalarType(this);
1229 Type *ScalarTy = Ctx.Types.inferScalarType(getOperand(0));
1230 if (VF.isScalar())
1231 return Ctx.TTI.getCmpSelInstrCost(Instruction::ICmp, ScalarTy,
1233 CmpInst::ICMP_EQ, Ctx.CostKind);
1234 // Calculate the cost of determining the lane index: NOT + cttz_elts + SUB.
1235 auto *PredTy = toVectorTy(ScalarTy, VF);
1236 IntrinsicCostAttributes Attrs(Intrinsic::experimental_cttz_elts, Ty,
1237 {PredTy, Type::getInt1Ty(Ctx.LLVMCtx)});
1238 InstructionCost Cost = Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1239 // Add cost of NOT operation on the predicate.
1240 Cost += Ctx.TTI.getArithmeticInstrCost(
1241 Instruction::Xor, PredTy, Ctx.CostKind,
1242 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
1243 {TargetTransformInfo::OK_UniformConstantValue,
1244 TargetTransformInfo::OP_None});
1245 // Add cost of SUB operation on the index.
1246 Cost += Ctx.TTI.getArithmeticInstrCost(Instruction::Sub, Ty, Ctx.CostKind);
1247 return Cost;
1248 }
1250 Type *ScalarTy = Ctx.Types.inferScalarType(this);
1251 Type *VecTy = toVectorTy(ScalarTy, VF);
1252 Type *MaskTy = toVectorTy(Type::getInt1Ty(Ctx.LLVMCtx), VF);
1254 Intrinsic::experimental_vector_extract_last_active, ScalarTy,
1255 {VecTy, MaskTy, ScalarTy});
1256 return Ctx.TTI.getIntrinsicInstrCost(ICA, Ctx.CostKind);
1257 }
1259 assert(VF.isVector() && "Scalar FirstOrderRecurrenceSplice?");
1260 Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
1261 return Ctx.TTI.getShuffleCost(
1263 cast<VectorType>(VectorTy), {}, Ctx.CostKind, -1);
1264 }
1266 Type *ArgTy = Ctx.Types.inferScalarType(getOperand(0));
1267 unsigned Multiplier = cast<VPConstantInt>(getOperand(2))->getZExtValue();
1268 Type *RetTy = toVectorTy(Type::getInt1Ty(Ctx.LLVMCtx), VF * Multiplier);
1269 IntrinsicCostAttributes Attrs(Intrinsic::get_active_lane_mask, RetTy,
1270 {ArgTy, ArgTy});
1271 return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1272 }
1274 Type *Arg0Ty = Ctx.Types.inferScalarType(getOperand(0));
1275 Type *I32Ty = Type::getInt32Ty(Ctx.LLVMCtx);
1276 Type *I1Ty = Type::getInt1Ty(Ctx.LLVMCtx);
1277 IntrinsicCostAttributes Attrs(Intrinsic::experimental_get_vector_length,
1278 I32Ty, {Arg0Ty, I32Ty, I1Ty});
1279 return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1280 }
1282 assert(VF.isVector() && "Reverse operation must be vector type");
1283 Type *EltTy = Ctx.Types.inferScalarType(this);
1284 // Skip the reverse operation cost for the mask.
1285 // FIXME: Remove this once redundant mask reverse operations can be
1286 // eliminated by VPlanTransforms::cse before cost computation.
1287 if (EltTy->isIntegerTy(1))
1288 return 0;
1289 auto *VectorTy = cast<VectorType>(toVectorTy(EltTy, VF));
1290 return Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy,
1291 VectorTy, /*Mask=*/{}, Ctx.CostKind,
1292 /*Index=*/0);
1293 }
1295 // Add on the cost of extracting the element.
1296 auto *VecTy = toVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
1297 return Ctx.TTI.getIndexedVectorInstrCostFromEnd(Instruction::ExtractElement,
1298 VecTy, Ctx.CostKind, 0);
1299 }
1300 case Instruction::FCmp:
1301 case Instruction::ICmp: {
1302 // FIXME: We don't handle scalar compares inside the loop here yet, as loop
1303 // exit conditions are handled by the legacy cost model and avoiding all
1304 // scalar compares is the simplest way to avoid double-counting compares
1305 // that compute the loop exit condition.
1306 bool IsScalar = vputils::onlyFirstLaneUsed(this);
1307 const VPRegionBlock *Region = getRegion();
1308 if (IsScalar && Region &&
1309 Region == Region->getPlan()->getVectorLoopRegion())
1310 return 0;
1312 getOpcode(), IsScalar ? ElementCount::getFixed(1) : VF, Ctx);
1313 }
1315 if (VF == ElementCount::getScalable(1))
1317 [[fallthrough]];
1318 default:
1319 // TODO: Compute cost other VPInstructions once the legacy cost model has
1320 // been retired.
1322 "unexpected VPInstruction witht underlying value");
1323 return 0;
1324 }
1325}
1326
1338
1340 switch (getOpcode()) {
1341 case Instruction::Load:
1342 case Instruction::PHI:
1346 return true;
1347 default:
1348 return isScalarCast();
1349 }
1350}
1351
1353 assert(!isMasked() && "cannot execute masked VPInstruction");
1354 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
1356 "Set flags not supported for the provided opcode");
1358 "Opcode requires specific flags to be set");
1359 if (hasFastMathFlags())
1360 State.Builder.setFastMathFlags(getFastMathFlags());
1361 Value *GeneratedValue = generate(State);
1362 if (!hasResult())
1363 return;
1364 assert(GeneratedValue && "generate must produce a value");
1365 bool GeneratesPerFirstLaneOnly = canGenerateScalarForFirstLane() &&
1368 assert((((GeneratedValue->getType()->isVectorTy() ||
1369 GeneratedValue->getType()->isStructTy()) ==
1370 !GeneratesPerFirstLaneOnly) ||
1371 State.VF.isScalar()) &&
1372 "scalar value but not only first lane defined");
1373 State.set(this, GeneratedValue,
1374 /*IsScalar*/ GeneratesPerFirstLaneOnly);
1376 // FIXME: This is a workaround to enable reliable updates of the scalar loop
1377 // resume phis, when vectorizing the epilogue. Must be removed once epilogue
1378 // vectorization explicitly connects VPlans.
1379 setUnderlyingValue(GeneratedValue);
1380 }
1381}
1382
1386 return false;
1387 switch (getOpcode()) {
1388 case Instruction::ExtractValue:
1389 case Instruction::InsertValue:
1390 case Instruction::GetElementPtr:
1391 case Instruction::ExtractElement:
1392 case Instruction::InsertElement:
1393 case Instruction::Freeze:
1394 case Instruction::FCmp:
1395 case Instruction::ICmp:
1396 case Instruction::Select:
1397 case Instruction::PHI:
1421 case VPInstruction::Not:
1430 return false;
1431 case Instruction::Call:
1432 return !getCalledFunction(*this)->doesNotAccessMemory();
1433 default:
1434 return true;
1435 }
1436}
1437
1439 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
1441 return vputils::onlyFirstLaneUsed(this);
1442
1443 switch (getOpcode()) {
1444 default:
1445 return false;
1446 case Instruction::ExtractElement:
1447 return Op == getOperand(1);
1448 case Instruction::InsertElement:
1449 return Op == getOperand(1) || Op == getOperand(2);
1450 case Instruction::PHI:
1451 return true;
1452 case Instruction::FCmp:
1453 case Instruction::ICmp:
1454 case Instruction::Select:
1455 case Instruction::Or:
1456 case Instruction::Freeze:
1457 case VPInstruction::Not:
1458 // TODO: Cover additional opcodes.
1459 return vputils::onlyFirstLaneUsed(this);
1460 case Instruction::Load:
1470 return true;
1473 // Before replicating by VF, Build(Struct)Vector uses all lanes of the
1474 // operand, after replicating its operands only the first lane is used.
1475 // Before replicating, it will have only a single operand.
1476 return getNumOperands() > 1;
1478 return Op == getOperand(0) || vputils::onlyFirstLaneUsed(this);
1480 // WidePtrAdd supports scalar and vector base addresses.
1481 return false;
1484 return Op == getOperand(0);
1485 };
1486 llvm_unreachable("switch should return");
1487}
1488
1490 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
1492 return vputils::onlyFirstPartUsed(this);
1493
1494 switch (getOpcode()) {
1495 default:
1496 return false;
1497 case Instruction::FCmp:
1498 case Instruction::ICmp:
1499 case Instruction::Select:
1500 return vputils::onlyFirstPartUsed(this);
1505 return true;
1506 };
1507 llvm_unreachable("switch should return");
1508}
1509
1510#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1512 VPSlotTracker SlotTracker(getParent()->getPlan());
1514}
1515
1517 VPSlotTracker &SlotTracker) const {
1518 O << Indent << "EMIT" << (isSingleScalar() ? "-SCALAR" : "") << " ";
1519
1520 if (hasResult()) {
1522 O << " = ";
1523 }
1524
1525 switch (getOpcode()) {
1526 case VPInstruction::Not:
1527 O << "not";
1528 break;
1530 O << "active lane mask";
1531 break;
1533 O << "EXPLICIT-VECTOR-LENGTH";
1534 break;
1536 O << "first-order splice";
1537 break;
1539 O << "branch-on-cond";
1540 break;
1542 O << "branch-on-two-conds";
1543 break;
1545 O << "TC > VF ? TC - VF : 0";
1546 break;
1548 O << "VF * Part +";
1549 break;
1551 O << "branch-on-count";
1552 break;
1554 O << "broadcast";
1555 break;
1557 O << "buildstructvector";
1558 break;
1560 O << "buildvector";
1561 break;
1563 O << "exiting-iv-value";
1564 break;
1566 O << "masked-cond";
1567 break;
1569 O << "extract-lane";
1570 break;
1572 O << "extract-last-lane";
1573 break;
1575 O << "extract-last-part";
1576 break;
1578 O << "extract-penultimate-element";
1579 break;
1581 O << "compute-reduction-result";
1582 break;
1584 O << "logical-and";
1585 break;
1587 O << "logical-or";
1588 break;
1590 O << "ptradd";
1591 break;
1593 O << "wide-ptradd";
1594 break;
1596 O << "any-of";
1597 break;
1599 O << "first-active-lane";
1600 break;
1602 O << "last-active-lane";
1603 break;
1605 O << "reduction-start-vector";
1606 break;
1608 O << "resume-for-epilogue";
1609 break;
1611 O << "reverse";
1612 break;
1614 O << "unpack";
1615 break;
1617 O << "extract-last-active";
1618 break;
1619 default:
1621 }
1622
1623 printFlags(O);
1625}
1626#endif
1627
1629 if (isScalarCast()) {
1630 Value *Op = State.get(getOperand(0), VPLane(0));
1631 Value *Cast = State.Builder.CreateCast(Instruction::CastOps(getOpcode()),
1632 Op, ResultTy);
1633 State.set(this, Cast, VPLane(0));
1634 return;
1635 }
1636 switch (getOpcode()) {
1638 Value *StepVector =
1639 State.Builder.CreateStepVector(VectorType::get(ResultTy, State.VF));
1640 State.set(this, StepVector);
1641 break;
1642 }
1643 case VPInstruction::VScale: {
1644 Value *VScale = State.Builder.CreateVScale(ResultTy);
1645 State.set(this, VScale, true);
1646 break;
1647 }
1648
1649 default:
1650 llvm_unreachable("opcode not implemented yet");
1651 }
1652}
1653
1654#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1656 VPSlotTracker &SlotTracker) const {
1657 O << Indent << "EMIT" << (isSingleScalar() ? "-SCALAR" : "") << " ";
1659 O << " = ";
1660
1661 switch (getOpcode()) {
1663 O << "wide-iv-step ";
1665 break;
1667 O << "step-vector " << *ResultTy;
1668 break;
1670 O << "vscale " << *ResultTy;
1671 break;
1672 case Instruction::Load:
1673 O << "load ";
1675 break;
1676 default:
1677 assert(Instruction::isCast(getOpcode()) && "unhandled opcode");
1680 O << " to " << *ResultTy;
1681 }
1682}
1683#endif
1684
1686 PHINode *NewPhi = State.Builder.CreatePHI(
1687 State.TypeAnalysis.inferScalarType(this), 2, getName());
1688 unsigned NumIncoming = getNumIncoming();
1689 // Detect header phis: the parent block dominates its second incoming block
1690 // (the latch). Those IR incoming values have not been generated yet and need
1691 // to be added after they have been executed.
1692 if (NumIncoming == 2 &&
1693 State.VPDT.dominates(getParent(), getIncomingBlock(1))) {
1694 NumIncoming = 1;
1695 }
1696 for (unsigned Idx = 0; Idx != NumIncoming; ++Idx) {
1697 Value *IncV = State.get(getIncomingValue(Idx), VPLane(0));
1698 BasicBlock *PredBB = State.CFG.VPBB2IRBB.at(getIncomingBlock(Idx));
1699 NewPhi->addIncoming(IncV, PredBB);
1700 }
1701 State.set(this, NewPhi, VPLane(0));
1702}
1703
1704#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1705void VPPhi::printRecipe(raw_ostream &O, const Twine &Indent,
1706 VPSlotTracker &SlotTracker) const {
1707 O << Indent << "EMIT" << (isSingleScalar() ? "-SCALAR" : "") << " ";
1709 O << " = phi";
1710 printFlags(O);
1712}
1713#endif
1714
1715VPIRInstruction *VPIRInstruction ::create(Instruction &I) {
1716 if (auto *Phi = dyn_cast<PHINode>(&I))
1717 return new VPIRPhi(*Phi);
1718 return new VPIRInstruction(I);
1719}
1720
1722 assert(!isa<VPIRPhi>(this) && getNumOperands() == 0 &&
1723 "PHINodes must be handled by VPIRPhi");
1724 // Advance the insert point after the wrapped IR instruction. This allows
1725 // interleaving VPIRInstructions and other recipes.
1726 State.Builder.SetInsertPoint(I.getParent(), std::next(I.getIterator()));
1727}
1728
1730 VPCostContext &Ctx) const {
1731 // The recipe wraps an existing IR instruction on the border of VPlan's scope,
1732 // hence it does not contribute to the cost-modeling for the VPlan.
1733 return 0;
1734}
1735
1736#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1738 VPSlotTracker &SlotTracker) const {
1739 O << Indent << "IR " << I;
1740}
1741#endif
1742
1744 PHINode *Phi = &getIRPhi();
1745 for (const auto &[Idx, Op] : enumerate(operands())) {
1746 VPValue *ExitValue = Op;
1747 auto Lane = vputils::isSingleScalar(ExitValue)
1749 : VPLane::getLastLaneForVF(State.VF);
1750 VPBlockBase *Pred = getParent()->getPredecessors()[Idx];
1751 auto *PredVPBB = Pred->getExitingBasicBlock();
1752 BasicBlock *PredBB = State.CFG.VPBB2IRBB[PredVPBB];
1753 // Set insertion point in PredBB in case an extract needs to be generated.
1754 // TODO: Model extracts explicitly.
1755 State.Builder.SetInsertPoint(PredBB->getTerminator());
1756 Value *V = State.get(ExitValue, VPLane(Lane));
1757 // If there is no existing block for PredBB in the phi, add a new incoming
1758 // value. Otherwise update the existing incoming value for PredBB.
1759 if (Phi->getBasicBlockIndex(PredBB) == -1)
1760 Phi->addIncoming(V, PredBB);
1761 else
1762 Phi->setIncomingValueForBlock(PredBB, V);
1763 }
1764
1765 // Advance the insert point after the wrapped IR instruction. This allows
1766 // interleaving VPIRInstructions and other recipes.
1767 State.Builder.SetInsertPoint(Phi->getParent(), std::next(Phi->getIterator()));
1768}
1769
1771 VPRecipeBase *R = const_cast<VPRecipeBase *>(getAsRecipe());
1772 assert(R->getNumOperands() == R->getParent()->getNumPredecessors() &&
1773 "Number of phi operands must match number of predecessors");
1774 unsigned Position = R->getParent()->getIndexForPredecessor(IncomingBlock);
1775 R->removeOperand(Position);
1776}
1777
1778VPValue *
1780 VPRecipeBase *R = const_cast<VPRecipeBase *>(getAsRecipe());
1781 return getIncomingValue(R->getParent()->getIndexForPredecessor(VPBB));
1782}
1783
1785 VPValue *V) const {
1786 VPRecipeBase *R = const_cast<VPRecipeBase *>(getAsRecipe());
1787 R->setOperand(R->getParent()->getIndexForPredecessor(VPBB), V);
1788}
1789
1790#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1792 VPSlotTracker &SlotTracker) const {
1793 interleaveComma(enumerate(getAsRecipe()->operands()), O,
1794 [this, &O, &SlotTracker](auto Op) {
1795 O << "[ ";
1796 Op.value()->printAsOperand(O, SlotTracker);
1797 O << ", ";
1798 getIncomingBlock(Op.index())->printAsOperand(O);
1799 O << " ]";
1800 });
1801}
1802#endif
1803
1804#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1806 VPSlotTracker &SlotTracker) const {
1808
1809 if (getNumOperands() != 0) {
1810 O << " (extra operand" << (getNumOperands() > 1 ? "s" : "") << ": ";
1812 [&O, &SlotTracker](auto Op) {
1813 std::get<0>(Op)->printAsOperand(O, SlotTracker);
1814 O << " from ";
1815 std::get<1>(Op)->printAsOperand(O);
1816 });
1817 O << ")";
1818 }
1819}
1820#endif
1821
1823 for (const auto &[Kind, Node] : Metadata)
1824 I.setMetadata(Kind, Node);
1825}
1826
1828 SmallVector<std::pair<unsigned, MDNode *>> MetadataIntersection;
1829 for (const auto &[KindA, MDA] : Metadata) {
1830 for (const auto &[KindB, MDB] : Other.Metadata) {
1831 if (KindA == KindB && MDA == MDB) {
1832 MetadataIntersection.emplace_back(KindA, MDA);
1833 break;
1834 }
1835 }
1836 }
1837 Metadata = std::move(MetadataIntersection);
1838}
1839
1840#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1842 const Module *M = SlotTracker.getModule();
1843 if (Metadata.empty() || !M)
1844 return;
1845
1846 ArrayRef<StringRef> MDNames = SlotTracker.getMDNames();
1847 O << " (";
1848 interleaveComma(Metadata, O, [&](const auto &KindNodePair) {
1849 auto [Kind, Node] = KindNodePair;
1850 assert(Kind < MDNames.size() && !MDNames[Kind].empty() &&
1851 "Unexpected unnamed metadata kind");
1852 O << "!" << MDNames[Kind] << " ";
1853 Node->printAsOperand(O, M);
1854 });
1855 O << ")";
1856}
1857#endif
1858
1860 assert(State.VF.isVector() && "not widening");
1861 assert(Variant != nullptr && "Can't create vector function.");
1862
1863 FunctionType *VFTy = Variant->getFunctionType();
1864 // Add return type if intrinsic is overloaded on it.
1866 for (const auto &I : enumerate(args())) {
1867 Value *Arg;
1868 // Some vectorized function variants may also take a scalar argument,
1869 // e.g. linear parameters for pointers. This needs to be the scalar value
1870 // from the start of the respective part when interleaving.
1871 if (!VFTy->getParamType(I.index())->isVectorTy())
1872 Arg = State.get(I.value(), VPLane(0));
1873 else
1874 Arg = State.get(I.value(), usesFirstLaneOnly(I.value()));
1875 Args.push_back(Arg);
1876 }
1877
1880 if (CI)
1881 CI->getOperandBundlesAsDefs(OpBundles);
1882
1883 CallInst *V = State.Builder.CreateCall(Variant, Args, OpBundles);
1884 applyFlags(*V);
1885 applyMetadata(*V);
1886 V->setCallingConv(Variant->getCallingConv());
1887
1888 if (!V->getType()->isVoidTy())
1889 State.set(this, V);
1890}
1891
1893 VPCostContext &Ctx) const {
1894 assert(getVectorizedTypeVF(Variant->getReturnType()) == VF &&
1895 "Variant return type must match VF");
1896 return computeCallCost(Variant, Ctx);
1897}
1898
1900 VPCostContext &Ctx) {
1901 return Ctx.TTI.getCallInstrCost(nullptr, Variant->getReturnType(),
1902 Variant->getFunctionType()->params(),
1903 Ctx.CostKind);
1904}
1905
1907 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
1908 assert(Variant && "Variant not set");
1909 FunctionType *VFTy = Variant->getFunctionType();
1910 return all_of(enumerate(args()), [VFTy, &Op](const auto &Arg) {
1911 auto [Idx, V] = Arg;
1912 Type *ArgTy = VFTy->getParamType(Idx);
1913 return V != Op || ArgTy->isIntegerTy() || ArgTy->isFloatingPointTy() ||
1914 ArgTy->isPointerTy() || ArgTy->isByteTy();
1915 });
1916}
1917
1918#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1920 VPSlotTracker &SlotTracker) const {
1921 O << Indent << "WIDEN-CALL ";
1922
1923 Function *CalledFn = getCalledScalarFunction();
1924 if (CalledFn->getReturnType()->isVoidTy())
1925 O << "void ";
1926 else {
1928 O << " = ";
1929 }
1930
1931 O << "call";
1932 printFlags(O);
1933 O << " @" << CalledFn->getName() << "(";
1934 interleaveComma(args(), O, [&O, &SlotTracker](VPValue *Op) {
1935 Op->printAsOperand(O, SlotTracker);
1936 });
1937 O << ")";
1938
1939 O << " (using library function";
1940 if (Variant->hasName())
1941 O << ": " << Variant->getName();
1942 O << ")";
1943}
1944#endif
1945
1947 assert(State.VF.isVector() && "not widening");
1948
1949 SmallVector<Type *, 2> TysForDecl;
1950 // Add return type if intrinsic is overloaded on it.
1951 if (isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, -1,
1952 State.TTI)) {
1953 Type *RetTy = toVectorizedTy(getResultType(), State.VF);
1954 ArrayRef<Type *> ContainedTys = getContainedTypes(RetTy);
1955 for (auto [Idx, Ty] : enumerate(ContainedTys)) {
1957 Idx, State.TTI))
1958 TysForDecl.push_back(Ty);
1959 }
1960 }
1962 for (const auto &I : enumerate(operands())) {
1963 // Some intrinsics have a scalar argument - don't replace it with a
1964 // vector.
1965 Value *Arg;
1966 if (isVectorIntrinsicWithScalarOpAtArg(VectorIntrinsicID, I.index(),
1967 State.TTI))
1968 Arg = State.get(I.value(), VPLane(0));
1969 else
1970 Arg = State.get(I.value(), usesFirstLaneOnly(I.value()));
1971 if (isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, I.index(),
1972 State.TTI))
1973 TysForDecl.push_back(Arg->getType());
1974 Args.push_back(Arg);
1975 }
1976
1977 // Use vector version of the intrinsic.
1978 Module *M = State.Builder.GetInsertBlock()->getModule();
1979 Function *VectorF =
1980 Intrinsic::getOrInsertDeclaration(M, VectorIntrinsicID, TysForDecl);
1981 assert(VectorF &&
1982 "Can't retrieve vector intrinsic or vector-predication intrinsics.");
1983
1986 if (CI)
1987 CI->getOperandBundlesAsDefs(OpBundles);
1988
1989 CallInst *V = State.Builder.CreateCall(VectorF, Args, OpBundles);
1990
1991 applyFlags(*V);
1992 applyMetadata(*V);
1993
1994 if (!V->getType()->isVoidTy())
1995 State.set(this, V);
1996}
1997
2000 const VPRecipeWithIRFlags &R, ElementCount VF, VPCostContext &Ctx) {
2001 Type *ScalarRetTy = Ctx.Types.inferScalarType(&R);
2002 // Skip the reverse operation cost for the mask.
2003 // FIXME: Remove this once redundant mask reverse operations can be eliminated
2004 // by VPlanTransforms::cse before cost computation.
2005 if (ID == Intrinsic::experimental_vp_reverse && ScalarRetTy->isIntegerTy(1))
2006 return InstructionCost(0);
2007
2008 // Some backends analyze intrinsic arguments to determine cost. Use the
2009 // underlying value for the operand if it has one. Otherwise try to use the
2010 // operand of the underlying call instruction, if there is one. Otherwise
2011 // clear Arguments.
2012 // TODO: Rework TTI interface to be independent of concrete IR values.
2014 for (const auto &[Idx, Op] : enumerate(Operands)) {
2015 auto *V = Op->getUnderlyingValue();
2016 if (!V) {
2017 if (auto *UI = dyn_cast_or_null<CallBase>(R.getUnderlyingValue())) {
2018 Arguments.push_back(UI->getArgOperand(Idx));
2019 continue;
2020 }
2021 Arguments.clear();
2022 break;
2023 }
2024 Arguments.push_back(V);
2025 }
2026
2027 Type *RetTy = VF.isVector() ? toVectorizedTy(ScalarRetTy, VF) : ScalarRetTy;
2028 SmallVector<Type *> ParamTys =
2029 map_to_vector(Operands, [&](const VPValue *Op) {
2030 return toVectorTy(Ctx.Types.inferScalarType(Op), VF);
2031 });
2032
2033 // TODO: Rework TTI interface to avoid reliance on underlying IntrinsicInst.
2034 IntrinsicCostAttributes CostAttrs(
2035 ID, RetTy, Arguments, ParamTys, R.getFastMathFlags(),
2036 dyn_cast_or_null<IntrinsicInst>(R.getUnderlyingValue()),
2038 return Ctx.TTI.getIntrinsicInstrCost(CostAttrs, Ctx.CostKind);
2039}
2040
2042 VPCostContext &Ctx) const {
2044 return computeCallCost(VectorIntrinsicID, ArgOps, *this, VF, Ctx);
2045}
2046
2048 return Intrinsic::getBaseName(VectorIntrinsicID);
2049}
2050
2052 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
2053 return all_of(enumerate(operands()), [this, &Op](const auto &X) {
2054 auto [Idx, V] = X;
2056 Idx, nullptr);
2057 });
2058}
2059
2060#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2062 VPSlotTracker &SlotTracker) const {
2063 O << Indent << "WIDEN-INTRINSIC ";
2064 if (ResultTy->isVoidTy()) {
2065 O << "void ";
2066 } else {
2068 O << " = ";
2069 }
2070
2071 O << "call";
2072 printFlags(O);
2073 O << getIntrinsicName() << "(";
2074
2076 Op->printAsOperand(O, SlotTracker);
2077 });
2078 O << ")";
2079}
2080#endif
2081
2083 IRBuilderBase &Builder = State.Builder;
2084
2085 Value *Address = State.get(getOperand(0));
2086 Value *IncAmt = State.get(getOperand(1), /*IsScalar=*/true);
2087 VectorType *VTy = cast<VectorType>(Address->getType());
2088
2089 // The histogram intrinsic requires a mask even if the recipe doesn't;
2090 // if the mask operand was omitted then all lanes should be executed and
2091 // we just need to synthesize an all-true mask.
2092 Value *Mask = nullptr;
2093 if (VPValue *VPMask = getMask())
2094 Mask = State.get(VPMask);
2095 else
2096 Mask =
2097 Builder.CreateVectorSplat(VTy->getElementCount(), Builder.getInt1(1));
2098
2099 // If this is a subtract, we want to invert the increment amount. We may
2100 // add a separate intrinsic in future, but for now we'll try this.
2101 if (Opcode == Instruction::Sub)
2102 IncAmt = Builder.CreateNeg(IncAmt);
2103 else
2104 assert(Opcode == Instruction::Add && "only add or sub supported for now");
2105
2106 State.Builder.CreateIntrinsic(Intrinsic::experimental_vector_histogram_add,
2107 {VTy, IncAmt->getType()},
2108 {Address, IncAmt, Mask});
2109}
2110
2112 VPCostContext &Ctx) const {
2113 // FIXME: Take the gather and scatter into account as well. For now we're
2114 // generating the same cost as the fallback path, but we'll likely
2115 // need to create a new TTI method for determining the cost, including
2116 // whether we can use base + vec-of-smaller-indices or just
2117 // vec-of-pointers.
2118 assert(VF.isVector() && "Invalid VF for histogram cost");
2119 Type *AddressTy = Ctx.Types.inferScalarType(getOperand(0));
2120 VPValue *IncAmt = getOperand(1);
2121 Type *IncTy = Ctx.Types.inferScalarType(IncAmt);
2122 VectorType *VTy = VectorType::get(IncTy, VF);
2123
2124 // Assume that a non-constant update value (or a constant != 1) requires
2125 // a multiply, and add that into the cost.
2126 InstructionCost MulCost =
2127 Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, VTy, Ctx.CostKind);
2128 if (match(IncAmt, m_One()))
2129 MulCost = TTI::TCC_Free;
2130
2131 // Find the cost of the histogram operation itself.
2132 Type *PtrTy = VectorType::get(AddressTy, VF);
2133 Type *MaskTy = VectorType::get(Type::getInt1Ty(Ctx.LLVMCtx), VF);
2134 IntrinsicCostAttributes ICA(Intrinsic::experimental_vector_histogram_add,
2135 Type::getVoidTy(Ctx.LLVMCtx),
2136 {PtrTy, IncTy, MaskTy});
2137
2138 // Add the costs together with the add/sub operation.
2139 return Ctx.TTI.getIntrinsicInstrCost(ICA, Ctx.CostKind) + MulCost +
2140 Ctx.TTI.getArithmeticInstrCost(Opcode, VTy, Ctx.CostKind);
2141}
2142
2143#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2145 VPSlotTracker &SlotTracker) const {
2146 O << Indent << "WIDEN-HISTOGRAM buckets: ";
2148
2149 if (Opcode == Instruction::Sub)
2150 O << ", dec: ";
2151 else {
2152 assert(Opcode == Instruction::Add);
2153 O << ", inc: ";
2154 }
2156
2157 if (VPValue *Mask = getMask()) {
2158 O << ", mask: ";
2159 Mask->printAsOperand(O, SlotTracker);
2160 }
2161}
2162#endif
2163
2164VPIRFlags::FastMathFlagsTy::FastMathFlagsTy(const FastMathFlags &FMF) {
2165 AllowReassoc = FMF.allowReassoc();
2166 NoNaNs = FMF.noNaNs();
2167 NoInfs = FMF.noInfs();
2168 NoSignedZeros = FMF.noSignedZeros();
2169 AllowReciprocal = FMF.allowReciprocal();
2170 AllowContract = FMF.allowContract();
2171 ApproxFunc = FMF.approxFunc();
2172}
2173
2175 switch (Opcode) {
2176 case Instruction::Add:
2177 case Instruction::Sub:
2178 case Instruction::Mul:
2179 case Instruction::Shl:
2181 return WrapFlagsTy(false, false);
2182 case Instruction::Trunc:
2183 return TruncFlagsTy(false, false);
2184 case Instruction::Or:
2185 return DisjointFlagsTy(false);
2186 case Instruction::AShr:
2187 case Instruction::LShr:
2188 case Instruction::UDiv:
2189 case Instruction::SDiv:
2190 return ExactFlagsTy(false);
2191 case Instruction::GetElementPtr:
2194 return GEPNoWrapFlags::none();
2195 case Instruction::ZExt:
2196 case Instruction::UIToFP:
2197 return NonNegFlagsTy(false);
2198 case Instruction::FAdd:
2199 case Instruction::FSub:
2200 case Instruction::FMul:
2201 case Instruction::FDiv:
2202 case Instruction::FRem:
2203 case Instruction::FNeg:
2204 case Instruction::FPExt:
2205 case Instruction::FPTrunc:
2206 return FastMathFlags();
2207 case Instruction::ICmp:
2208 case Instruction::FCmp:
2210 llvm_unreachable("opcode requires explicit flags");
2211 default:
2212 return VPIRFlags();
2213 }
2214}
2215
2216#if !defined(NDEBUG)
2217bool VPIRFlags::flagsValidForOpcode(unsigned Opcode) const {
2218 switch (OpType) {
2219 case OperationType::OverflowingBinOp:
2220 return Opcode == Instruction::Add || Opcode == Instruction::Sub ||
2221 Opcode == Instruction::Mul || Opcode == Instruction::Shl ||
2222 Opcode == VPInstruction::VPInstruction::CanonicalIVIncrementForPart;
2223 case OperationType::Trunc:
2224 return Opcode == Instruction::Trunc;
2225 case OperationType::DisjointOp:
2226 return Opcode == Instruction::Or;
2227 case OperationType::PossiblyExactOp:
2228 return Opcode == Instruction::AShr || Opcode == Instruction::LShr ||
2229 Opcode == Instruction::UDiv || Opcode == Instruction::SDiv;
2230 case OperationType::GEPOp:
2231 return Opcode == Instruction::GetElementPtr ||
2232 Opcode == VPInstruction::PtrAdd ||
2233 Opcode == VPInstruction::WidePtrAdd;
2234 case OperationType::FPMathOp:
2235 return Opcode == Instruction::Call || Opcode == Instruction::FAdd ||
2236 Opcode == Instruction::FMul || Opcode == Instruction::FSub ||
2237 Opcode == Instruction::FNeg || Opcode == Instruction::FDiv ||
2238 Opcode == Instruction::FRem || Opcode == Instruction::FPExt ||
2239 Opcode == Instruction::FPTrunc || Opcode == Instruction::PHI ||
2240 Opcode == Instruction::Select ||
2241 Opcode == VPInstruction::WideIVStep ||
2243 case OperationType::FCmp:
2244 return Opcode == Instruction::FCmp;
2245 case OperationType::NonNegOp:
2246 return Opcode == Instruction::ZExt || Opcode == Instruction::UIToFP;
2247 case OperationType::Cmp:
2248 return Opcode == Instruction::FCmp || Opcode == Instruction::ICmp;
2249 case OperationType::ReductionOp:
2251 case OperationType::Other:
2252 return true;
2253 }
2254 llvm_unreachable("Unknown OperationType enum");
2255}
2256
2257bool VPIRFlags::hasRequiredFlagsForOpcode(unsigned Opcode) const {
2258 // Handle opcodes without default flags.
2259 if (Opcode == Instruction::ICmp)
2260 return OpType == OperationType::Cmp;
2261 if (Opcode == Instruction::FCmp)
2262 return OpType == OperationType::FCmp;
2264 return OpType == OperationType::ReductionOp;
2265
2266 OperationType Required = getDefaultFlags(Opcode).OpType;
2267 return Required == OperationType::Other || Required == OpType;
2268}
2269#endif
2270
2271#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2273 switch (OpType) {
2274 case OperationType::Cmp:
2276 break;
2277 case OperationType::FCmp:
2280 break;
2281 case OperationType::DisjointOp:
2282 if (DisjointFlags.IsDisjoint)
2283 O << " disjoint";
2284 break;
2285 case OperationType::PossiblyExactOp:
2286 if (ExactFlags.IsExact)
2287 O << " exact";
2288 break;
2289 case OperationType::OverflowingBinOp:
2290 if (WrapFlags.HasNUW)
2291 O << " nuw";
2292 if (WrapFlags.HasNSW)
2293 O << " nsw";
2294 break;
2295 case OperationType::Trunc:
2296 if (TruncFlags.HasNUW)
2297 O << " nuw";
2298 if (TruncFlags.HasNSW)
2299 O << " nsw";
2300 break;
2301 case OperationType::FPMathOp:
2303 break;
2304 case OperationType::GEPOp: {
2306 if (Flags.isInBounds())
2307 O << " inbounds";
2308 else if (Flags.hasNoUnsignedSignedWrap())
2309 O << " nusw";
2310 if (Flags.hasNoUnsignedWrap())
2311 O << " nuw";
2312 break;
2313 }
2314 case OperationType::NonNegOp:
2315 if (NonNegFlags.NonNeg)
2316 O << " nneg";
2317 break;
2318 case OperationType::ReductionOp: {
2319 RecurKind RK = getRecurKind();
2320 O << " (";
2321 switch (RK) {
2322 case RecurKind::AnyOf:
2323 O << "any-of";
2324 break;
2326 O << "find-last";
2327 break;
2328 case RecurKind::SMax:
2329 O << "smax";
2330 break;
2331 case RecurKind::SMin:
2332 O << "smin";
2333 break;
2334 case RecurKind::UMax:
2335 O << "umax";
2336 break;
2337 case RecurKind::UMin:
2338 O << "umin";
2339 break;
2340 case RecurKind::FMinNum:
2341 O << "fminnum";
2342 break;
2343 case RecurKind::FMaxNum:
2344 O << "fmaxnum";
2345 break;
2347 O << "fminimum";
2348 break;
2350 O << "fmaximum";
2351 break;
2353 O << "fminimumnum";
2354 break;
2356 O << "fmaximumnum";
2357 break;
2358 default:
2360 break;
2361 }
2362 if (isReductionInLoop())
2363 O << ", in-loop";
2364 if (isReductionOrdered())
2365 O << ", ordered";
2366 O << ")";
2368 break;
2369 }
2370 case OperationType::Other:
2371 break;
2372 }
2373 O << " ";
2374}
2375#endif
2376
2378 auto &Builder = State.Builder;
2379 switch (Opcode) {
2380 case Instruction::Call:
2381 case Instruction::UncondBr:
2382 case Instruction::CondBr:
2383 case Instruction::PHI:
2384 case Instruction::GetElementPtr:
2385 llvm_unreachable("This instruction is handled by a different recipe.");
2386 case Instruction::UDiv:
2387 case Instruction::SDiv:
2388 case Instruction::SRem:
2389 case Instruction::URem:
2390 case Instruction::Add:
2391 case Instruction::FAdd:
2392 case Instruction::Sub:
2393 case Instruction::FSub:
2394 case Instruction::FNeg:
2395 case Instruction::Mul:
2396 case Instruction::FMul:
2397 case Instruction::FDiv:
2398 case Instruction::FRem:
2399 case Instruction::Shl:
2400 case Instruction::LShr:
2401 case Instruction::AShr:
2402 case Instruction::And:
2403 case Instruction::Or:
2404 case Instruction::Xor: {
2405 // Just widen unops and binops.
2407 for (VPValue *VPOp : operands())
2408 Ops.push_back(State.get(VPOp));
2409
2410 Value *V = Builder.CreateNAryOp(Opcode, Ops);
2411
2412 if (auto *VecOp = dyn_cast<Instruction>(V)) {
2413 applyFlags(*VecOp);
2414 applyMetadata(*VecOp);
2415 }
2416
2417 // Use this vector value for all users of the original instruction.
2418 State.set(this, V);
2419 break;
2420 }
2421 case Instruction::ExtractValue: {
2422 assert(getNumOperands() == 2 && "expected single level extractvalue");
2423 Value *Op = State.get(getOperand(0));
2424 Value *Extract = Builder.CreateExtractValue(
2425 Op, cast<VPConstantInt>(getOperand(1))->getZExtValue());
2426 State.set(this, Extract);
2427 break;
2428 }
2429 case Instruction::Freeze: {
2430 Value *Op = State.get(getOperand(0));
2431 Value *Freeze = Builder.CreateFreeze(Op);
2432 State.set(this, Freeze);
2433 break;
2434 }
2435 case Instruction::ICmp:
2436 case Instruction::FCmp: {
2437 // Widen compares. Generate vector compares.
2438 bool FCmp = Opcode == Instruction::FCmp;
2439 Value *A = State.get(getOperand(0));
2440 Value *B = State.get(getOperand(1));
2441 Value *C = nullptr;
2442 if (FCmp) {
2443 C = Builder.CreateFCmp(getPredicate(), A, B);
2444 } else {
2445 C = Builder.CreateICmp(getPredicate(), A, B);
2446 }
2447 if (auto *I = dyn_cast<Instruction>(C)) {
2448 applyFlags(*I);
2449 applyMetadata(*I);
2450 }
2451 State.set(this, C);
2452 break;
2453 }
2454 case Instruction::Select: {
2455 VPValue *CondOp = getOperand(0);
2456 Value *Cond = State.get(CondOp, vputils::isSingleScalar(CondOp));
2457 Value *Op0 = State.get(getOperand(1));
2458 Value *Op1 = State.get(getOperand(2));
2459 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
2460 State.set(this, Sel);
2461 if (auto *I = dyn_cast<Instruction>(Sel)) {
2463 applyFlags(*I);
2464 applyMetadata(*I);
2465 }
2466 break;
2467 }
2468 default:
2469 // This instruction is not vectorized by simple widening.
2470 LLVM_DEBUG(dbgs() << "LV: Found an unhandled opcode : "
2471 << Instruction::getOpcodeName(Opcode));
2472 llvm_unreachable("Unhandled instruction!");
2473 } // end of switch.
2474
2475#if !defined(NDEBUG)
2476 // Verify that VPlan type inference results agree with the type of the
2477 // generated values.
2478 assert(VectorType::get(State.TypeAnalysis.inferScalarType(this), State.VF) ==
2479 State.get(this)->getType() &&
2480 "inferred type and type from generated instructions do not match");
2481#endif
2482}
2483
2485 VPCostContext &Ctx) const {
2486 switch (Opcode) {
2487 case Instruction::UDiv:
2488 case Instruction::SDiv:
2489 case Instruction::SRem:
2490 case Instruction::URem:
2491 // If the div/rem operation isn't safe to speculate and requires
2492 // predication, then the only way we can even create a vplan is to insert
2493 // a select on the second input operand to ensure we use the value of 1
2494 // for the inactive lanes. The select will be costed separately.
2495 case Instruction::FNeg:
2496 case Instruction::Add:
2497 case Instruction::FAdd:
2498 case Instruction::Sub:
2499 case Instruction::FSub:
2500 case Instruction::Mul:
2501 case Instruction::FMul:
2502 case Instruction::FDiv:
2503 case Instruction::FRem:
2504 case Instruction::Shl:
2505 case Instruction::LShr:
2506 case Instruction::AShr:
2507 case Instruction::And:
2508 case Instruction::Or:
2509 case Instruction::Xor:
2510 case Instruction::Freeze:
2511 case Instruction::ExtractValue:
2512 case Instruction::ICmp:
2513 case Instruction::FCmp:
2514 case Instruction::Select:
2515 return getCostForRecipeWithOpcode(getOpcode(), VF, Ctx);
2516 default:
2517 llvm_unreachable("Unsupported opcode for instruction");
2518 }
2519}
2520
2521#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2523 VPSlotTracker &SlotTracker) const {
2524 O << Indent << "WIDEN ";
2526 O << " = " << Instruction::getOpcodeName(Opcode);
2527 printFlags(O);
2529}
2530#endif
2531
2533 auto &Builder = State.Builder;
2534 /// Vectorize casts.
2535 assert(State.VF.isVector() && "Not vectorizing?");
2536 Type *DestTy = VectorType::get(getResultType(), State.VF);
2537 VPValue *Op = getOperand(0);
2538 Value *A = State.get(Op);
2539 Value *Cast = Builder.CreateCast(Instruction::CastOps(Opcode), A, DestTy);
2540 State.set(this, Cast);
2541 if (auto *CastOp = dyn_cast<Instruction>(Cast)) {
2542 applyFlags(*CastOp);
2543 applyMetadata(*CastOp);
2544 }
2545}
2546
2551
2552#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2554 VPSlotTracker &SlotTracker) const {
2555 O << Indent << "WIDEN-CAST ";
2557 O << " = " << Instruction::getOpcodeName(Opcode);
2558 printFlags(O);
2560 O << " to " << *getResultType();
2561}
2562#endif
2563
2565 VPCostContext &Ctx) const {
2566 return Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
2567}
2568
2569#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2571 raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const {
2572 O << Indent;
2574 O << " = WIDEN-INDUCTION";
2575 printFlags(O);
2577
2578 if (auto *TI = getTruncInst())
2579 O << " (truncated to " << *TI->getType() << ")";
2580}
2581#endif
2582
2584 // The step may be defined by a recipe in the preheader (e.g. if it requires
2585 // SCEV expansion), but for the canonical induction the step is required to be
2586 // 1, which is represented as live-in.
2587 return match(getStartValue(), m_ZeroInt()) &&
2588 match(getStepValue(), m_One()) &&
2589 getScalarType() == getRegion()->getCanonicalIVType();
2590}
2591
2592#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2594 VPSlotTracker &SlotTracker) const {
2595 O << Indent;
2597 O << " = DERIVED-IV ";
2598 getStartValue()->printAsOperand(O, SlotTracker);
2599 O << " + ";
2600 getOperand(1)->printAsOperand(O, SlotTracker);
2601 O << " * ";
2602 getStepValue()->printAsOperand(O, SlotTracker);
2603}
2604#endif
2605
2607 // Fast-math-flags propagate from the original induction instruction.
2608 IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
2609 State.Builder.setFastMathFlags(getFastMathFlags());
2610
2611 /// Compute scalar induction steps. \p ScalarIV is the scalar induction
2612 /// variable on which to base the steps, \p Step is the size of the step.
2613
2614 Value *BaseIV = State.get(getOperand(0), VPLane(0));
2615 Value *Step = State.get(getStepValue(), VPLane(0));
2616 IRBuilderBase &Builder = State.Builder;
2617
2618 // Ensure step has the same type as that of scalar IV.
2619 Type *BaseIVTy = BaseIV->getType()->getScalarType();
2620 assert(BaseIVTy == Step->getType() && "Types of BaseIV and Step must match!");
2621
2622 // We build scalar steps for both integer and floating-point induction
2623 // variables. Here, we determine the kind of arithmetic we will perform.
2626 if (BaseIVTy->isIntegerTy()) {
2627 AddOp = Instruction::Add;
2628 MulOp = Instruction::Mul;
2629 } else {
2630 AddOp = InductionOpcode;
2631 MulOp = Instruction::FMul;
2632 }
2633
2634 // Determine the number of scalars we need to generate.
2635 bool FirstLaneOnly = vputils::onlyFirstLaneUsed(this);
2636 // Compute the scalar steps and save the results in State.
2637
2638 unsigned EndLane = FirstLaneOnly ? 1 : State.VF.getKnownMinValue();
2639 Value *StartIdx0 = getStartIndex() ? State.get(getStartIndex(), true)
2640 : Constant::getNullValue(BaseIVTy);
2641
2642 for (unsigned Lane = 0; Lane < EndLane; ++Lane) {
2643 // It is okay if the induction variable type cannot hold the lane number,
2644 // we expect truncation in this case.
2645 Constant *LaneValue =
2646 BaseIVTy->isIntegerTy()
2647 ? ConstantInt::get(BaseIVTy, Lane, /*IsSigned=*/false,
2648 /*ImplicitTrunc=*/true)
2649 : ConstantFP::get(BaseIVTy, Lane);
2650 Value *StartIdx = Builder.CreateBinOp(AddOp, StartIdx0, LaneValue);
2651 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
2652 "Expected StartIdx to be folded to a constant when VF is not "
2653 "scalable");
2654 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2655 auto *Add = Builder.CreateBinOp(AddOp, BaseIV, Mul);
2656 State.set(this, Add, VPLane(Lane));
2657 }
2658}
2659
2660#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2662 VPSlotTracker &SlotTracker) const {
2663 O << Indent;
2665 O << " = SCALAR-STEPS ";
2667}
2668#endif
2669
2671 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
2673}
2674
2676 assert(State.VF.isVector() && "not widening");
2677 // Construct a vector GEP by widening the operands of the scalar GEP as
2678 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
2679 // results in a vector of pointers when at least one operand of the GEP
2680 // is vector-typed. Thus, to keep the representation compact, we only use
2681 // vector-typed operands for loop-varying values.
2682
2683 bool AllOperandsAreInvariant = all_of(operands(), [](VPValue *Op) {
2684 return Op->isDefinedOutsideLoopRegions();
2685 });
2686 if (AllOperandsAreInvariant) {
2687 // If we are vectorizing, but the GEP has only loop-invariant operands,
2688 // the GEP we build (by only using vector-typed operands for
2689 // loop-varying values) would be a scalar pointer. Thus, to ensure we
2690 // produce a vector of pointers, we need to either arbitrarily pick an
2691 // operand to broadcast, or broadcast a clone of the original GEP.
2692 // Here, we broadcast a clone of the original.
2693
2695 for (unsigned I = 0, E = getNumOperands(); I != E; I++)
2696 Ops.push_back(State.get(getOperand(I), VPLane(0)));
2697
2698 auto *NewGEP =
2699 State.Builder.CreateGEP(getSourceElementType(), Ops[0], drop_begin(Ops),
2700 "", getGEPNoWrapFlags());
2701 Value *Splat = State.Builder.CreateVectorSplat(State.VF, NewGEP);
2702 State.set(this, Splat);
2703 return;
2704 }
2705
2706 // If the GEP has at least one loop-varying operand, we are sure to
2707 // produce a vector of pointers unless VF is scalar.
2708 // The pointer operand of the new GEP. If it's loop-invariant, we
2709 // won't broadcast it.
2710 auto *Ptr = State.get(getOperand(0), isPointerLoopInvariant());
2711
2712 // Collect all the indices for the new GEP. If any index is
2713 // loop-invariant, we won't broadcast it.
2715 for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
2716 VPValue *Operand = getOperand(I);
2717 Indices.push_back(State.get(Operand, isIndexLoopInvariant(I - 1)));
2718 }
2719
2720 // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
2721 // but it should be a vector, otherwise.
2722 auto *NewGEP = State.Builder.CreateGEP(getSourceElementType(), Ptr, Indices,
2723 "", getGEPNoWrapFlags());
2724 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
2725 "NewGEP is not a pointer vector");
2726 State.set(this, NewGEP);
2727}
2728
2729#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2731 VPSlotTracker &SlotTracker) const {
2732 O << Indent << "WIDEN-GEP ";
2733 O << (isPointerLoopInvariant() ? "Inv" : "Var");
2734 for (size_t I = 0; I < getNumOperands() - 1; ++I)
2735 O << "[" << (isIndexLoopInvariant(I) ? "Inv" : "Var") << "]";
2736
2737 O << " ";
2739 O << " = getelementptr";
2740 printFlags(O);
2742}
2743#endif
2744
2746 assert(!getOffset() && "Unexpected offset operand");
2747 VPBuilder Builder(this);
2748 VPlan &Plan = *getParent()->getPlan();
2749 VPValue *VFVal = getVFValue();
2750 VPTypeAnalysis TypeInfo(Plan);
2751 const DataLayout &DL = Plan.getDataLayout();
2752 Type *IndexTy = DL.getIndexType(TypeInfo.inferScalarType(this));
2753 VPValue *Stride =
2754 Plan.getConstantInt(IndexTy, getStride(), /*IsSigned=*/true);
2755 Type *VFTy = TypeInfo.inferScalarType(VFVal);
2756 VPValue *VF = Builder.createScalarZExtOrTrunc(VFVal, IndexTy, VFTy,
2758
2759 // Offset for Part0 = Offset0 = Stride * (VF - 1).
2760 VPInstruction *VFMinusOne =
2761 Builder.createSub(VF, Plan.getConstantInt(IndexTy, 1u),
2762 DebugLoc::getUnknown(), "", {true, true});
2763 VPInstruction *Offset0 =
2764 Builder.createOverflowingOp(Instruction::Mul, {VFMinusOne, Stride});
2765
2766 // Offset for PartN = Offset0 + Part * Stride * VF.
2767 VPValue *PartxStride =
2768 Plan.getConstantInt(IndexTy, Part * getStride(), /*IsSigned=*/true);
2769 VPValue *Offset = Builder.createAdd(
2770 Offset0,
2771 Builder.createOverflowingOp(Instruction::Mul, {PartxStride, VF}));
2773}
2774
2776 auto &Builder = State.Builder;
2777 assert(getOffset() && "Expected prior materialization of offset");
2778 Value *Ptr = State.get(getPointer(), true);
2779 Value *Offset = State.get(getOffset(), true);
2780 Value *ResultPtr = Builder.CreateGEP(getSourceElementType(), Ptr, Offset, "",
2782 State.set(this, ResultPtr, /*IsScalar*/ true);
2783}
2784
2785#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2787 VPSlotTracker &SlotTracker) const {
2788 O << Indent;
2790 O << " = vector-end-pointer";
2791 printFlags(O);
2793}
2794#endif
2795
2797 auto &Builder = State.Builder;
2798 assert(getOffset() &&
2799 "Expected prior simplification of recipe without offset");
2800 Value *Ptr = State.get(getOperand(0), VPLane(0));
2801 Value *Offset = State.get(getOffset(), true);
2802 Value *ResultPtr = Builder.CreateGEP(getSourceElementType(), Ptr, Offset, "",
2804 State.set(this, ResultPtr, /*IsScalar*/ true);
2805}
2806
2807#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2809 VPSlotTracker &SlotTracker) const {
2810 O << Indent;
2812 O << " = vector-pointer";
2813 printFlags(O);
2815}
2816#endif
2817
2819 VPCostContext &Ctx) const {
2820 // A blend will be expanded to a select VPInstruction, which will generate a
2821 // scalar select if only the first lane is used.
2823 VF = ElementCount::getFixed(1);
2824
2825 Type *ResultTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
2826 Type *CmpTy = toVectorTy(Type::getInt1Ty(Ctx.Types.getContext()), VF);
2827 return (getNumIncomingValues() - 1) *
2828 Ctx.TTI.getCmpSelInstrCost(Instruction::Select, ResultTy, CmpTy,
2829 CmpInst::BAD_ICMP_PREDICATE, Ctx.CostKind);
2830}
2831
2832#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2834 VPSlotTracker &SlotTracker) const {
2835 O << Indent << "BLEND ";
2837 O << " =";
2838 printFlags(O);
2839 if (getNumIncomingValues() == 1) {
2840 // Not a User of any mask: not really blending, this is a
2841 // single-predecessor phi.
2842 getIncomingValue(0)->printAsOperand(O, SlotTracker);
2843 } else {
2844 for (unsigned I = 0, E = getNumIncomingValues(); I < E; ++I) {
2845 if (I != 0)
2846 O << " ";
2847 getIncomingValue(I)->printAsOperand(O, SlotTracker);
2848 if (I == 0 && isNormalized())
2849 continue;
2850 O << "/";
2851 getMask(I)->printAsOperand(O, SlotTracker);
2852 }
2853 }
2854}
2855#endif
2856
2860 "In-loop AnyOf reductions aren't currently supported");
2861 // Propagate the fast-math flags carried by the underlying instruction.
2862 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
2863 State.Builder.setFastMathFlags(getFastMathFlags());
2864 Value *NewVecOp = State.get(getVecOp());
2865 if (VPValue *Cond = getCondOp()) {
2866 Value *NewCond = State.get(Cond, State.VF.isScalar());
2867 VectorType *VecTy = dyn_cast<VectorType>(NewVecOp->getType());
2868 Type *ElementTy = VecTy ? VecTy->getElementType() : NewVecOp->getType();
2869
2870 Value *Start = getRecurrenceIdentity(Kind, ElementTy, getFastMathFlags());
2871 if (State.VF.isVector())
2872 Start = State.Builder.CreateVectorSplat(VecTy->getElementCount(), Start);
2873
2874 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, Start);
2875 NewVecOp = Select;
2876 }
2877 Value *NewRed;
2878 Value *NextInChain;
2879 if (isOrdered()) {
2880 Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ true);
2881 if (State.VF.isVector())
2882 NewRed =
2883 createOrderedReduction(State.Builder, Kind, NewVecOp, PrevInChain);
2884 else
2885 NewRed = State.Builder.CreateBinOp(
2887 PrevInChain, NewVecOp);
2888 PrevInChain = NewRed;
2889 NextInChain = NewRed;
2890 } else if (isPartialReduction()) {
2891 assert((Kind == RecurKind::Add || Kind == RecurKind::FAdd) &&
2892 "Unexpected partial reduction kind");
2893 Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ false);
2894 NewRed = State.Builder.CreateIntrinsic(
2895 PrevInChain->getType(),
2896 Kind == RecurKind::Add ? Intrinsic::vector_partial_reduce_add
2897 : Intrinsic::vector_partial_reduce_fadd,
2898 {PrevInChain, NewVecOp}, State.Builder.getFastMathFlags(),
2899 "partial.reduce");
2900 PrevInChain = NewRed;
2901 NextInChain = NewRed;
2902 } else {
2903 assert(isInLoop() &&
2904 "The reduction must either be ordered, partial or in-loop");
2905 Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ true);
2906 NewRed = createSimpleReduction(State.Builder, NewVecOp, Kind);
2908 NextInChain = createMinMaxOp(State.Builder, Kind, NewRed, PrevInChain);
2909 else
2910 NextInChain = State.Builder.CreateBinOp(
2912 PrevInChain, NewRed);
2913 }
2914 State.set(this, NextInChain, /*IsScalar*/ !isPartialReduction());
2915}
2916
2918
2919 auto &Builder = State.Builder;
2920 // Propagate the fast-math flags carried by the underlying instruction.
2921 IRBuilderBase::FastMathFlagGuard FMFGuard(Builder);
2922 Builder.setFastMathFlags(getFastMathFlags());
2923
2925 Value *Prev = State.get(getChainOp(), /*IsScalar*/ true);
2926 Value *VecOp = State.get(getVecOp());
2927 Value *EVL = State.get(getEVL(), VPLane(0));
2928
2929 Value *Mask;
2930 if (VPValue *CondOp = getCondOp())
2931 Mask = State.get(CondOp);
2932 else
2933 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
2934
2935 Value *NewRed;
2936 if (isOrdered()) {
2937 NewRed = createOrderedReduction(Builder, Kind, VecOp, Prev, Mask, EVL);
2938 } else {
2939 NewRed = createSimpleReduction(Builder, VecOp, Kind, Mask, EVL);
2941 NewRed = createMinMaxOp(Builder, Kind, NewRed, Prev);
2942 else
2943 NewRed = Builder.CreateBinOp(
2945 Prev);
2946 }
2947 State.set(this, NewRed, /*IsScalar*/ true);
2948}
2949
2951 VPCostContext &Ctx) const {
2952 RecurKind RdxKind = getRecurrenceKind();
2953 Type *ElementTy = Ctx.Types.inferScalarType(this);
2954 auto *VectorTy = cast<VectorType>(toVectorTy(ElementTy, VF));
2955 unsigned Opcode = RecurrenceDescriptor::getOpcode(RdxKind);
2957 std::optional<FastMathFlags> OptionalFMF =
2958 ElementTy->isFloatingPointTy() ? std::make_optional(FMFs) : std::nullopt;
2959
2960 if (isPartialReduction()) {
2961 InstructionCost CondCost = 0;
2962 if (isConditional()) {
2964 auto *CondTy = cast<VectorType>(
2965 toVectorTy(Ctx.Types.inferScalarType(getCondOp()), VF));
2966 CondCost = Ctx.TTI.getCmpSelInstrCost(Instruction::Select, VectorTy,
2967 CondTy, Pred, Ctx.CostKind);
2968 }
2969 return CondCost + Ctx.TTI.getPartialReductionCost(
2970 Opcode, ElementTy, ElementTy, ElementTy, VF,
2971 TTI::PR_None, TTI::PR_None, {}, Ctx.CostKind,
2972 OptionalFMF);
2973 }
2974
2975 // TODO: Support any-of reductions.
2976 assert(
2978 ForceTargetInstructionCost.getNumOccurrences() > 0) &&
2979 "Any-of reduction not implemented in VPlan-based cost model currently.");
2980
2981 // Note that TTI should model the cost of moving result to the scalar register
2982 // and the BinOp cost in the getMinMaxReductionCost().
2985 return Ctx.TTI.getMinMaxReductionCost(Id, VectorTy, FMFs, Ctx.CostKind);
2986 }
2987
2988 // Note that TTI should model the cost of moving result to the scalar register
2989 // and the BinOp cost in the getArithmeticReductionCost().
2990 return Ctx.TTI.getArithmeticReductionCost(Opcode, VectorTy, OptionalFMF,
2991 Ctx.CostKind);
2992}
2993
2994VPExpressionRecipe::VPExpressionRecipe(
2995 ExpressionTypes ExpressionType,
2996 ArrayRef<VPSingleDefRecipe *> ExpressionRecipes)
2997 : VPSingleDefRecipe(VPRecipeBase::VPExpressionSC, {}, {}),
2998 ExpressionRecipes(ExpressionRecipes), ExpressionType(ExpressionType) {
2999 assert(!ExpressionRecipes.empty() && "Nothing to combine?");
3000 assert(
3001 none_of(ExpressionRecipes,
3002 [](VPSingleDefRecipe *R) { return R->mayHaveSideEffects(); }) &&
3003 "expression cannot contain recipes with side-effects");
3004
3005 // Maintain a copy of the expression recipes as a set of users.
3006 SmallPtrSet<VPUser *, 4> ExpressionRecipesAsSetOfUsers;
3007 for (auto *R : ExpressionRecipes)
3008 ExpressionRecipesAsSetOfUsers.insert(R);
3009
3010 // Recipes in the expression, except the last one, must only be used by
3011 // (other) recipes inside the expression. If there are other users, external
3012 // to the expression, use a clone of the recipe for external users.
3013 for (VPSingleDefRecipe *R : reverse(ExpressionRecipes)) {
3014 if (R != ExpressionRecipes.back() &&
3015 any_of(R->users(), [&ExpressionRecipesAsSetOfUsers](VPUser *U) {
3016 return !ExpressionRecipesAsSetOfUsers.contains(U);
3017 })) {
3018 // There are users outside of the expression. Clone the recipe and use the
3019 // clone those external users.
3020 VPSingleDefRecipe *CopyForExtUsers = R->clone();
3021 R->replaceUsesWithIf(CopyForExtUsers, [&ExpressionRecipesAsSetOfUsers](
3022 VPUser &U, unsigned) {
3023 return !ExpressionRecipesAsSetOfUsers.contains(&U);
3024 });
3025 CopyForExtUsers->insertBefore(R);
3026 }
3027 if (R->getParent())
3028 R->removeFromParent();
3029 }
3030
3031 // Internalize all external operands to the expression recipes. To do so,
3032 // create new temporary VPValues for all operands defined by a recipe outside
3033 // the expression. The original operands are added as operands of the
3034 // VPExpressionRecipe itself.
3035 for (auto *R : ExpressionRecipes) {
3036 for (const auto &[Idx, Op] : enumerate(R->operands())) {
3037 auto *Def = Op->getDefiningRecipe();
3038 if (Def && ExpressionRecipesAsSetOfUsers.contains(Def))
3039 continue;
3040 addOperand(Op);
3041 LiveInPlaceholders.push_back(new VPSymbolicValue(nullptr));
3042 }
3043 }
3044
3045 // Replace each external operand with the first one created for it in
3046 // LiveInPlaceholders.
3047 for (auto *R : ExpressionRecipes)
3048 for (auto const &[LiveIn, Tmp] : zip(operands(), LiveInPlaceholders))
3049 R->replaceUsesOfWith(LiveIn, Tmp);
3050}
3051
3053 for (auto *R : ExpressionRecipes)
3054 // Since the list could contain duplicates, make sure the recipe hasn't
3055 // already been inserted.
3056 if (!R->getParent())
3057 R->insertBefore(this);
3058
3059 for (const auto &[Idx, Op] : enumerate(operands()))
3060 LiveInPlaceholders[Idx]->replaceAllUsesWith(Op);
3061
3062 replaceAllUsesWith(ExpressionRecipes.back());
3063 ExpressionRecipes.clear();
3064}
3065
3067 VPCostContext &Ctx) const {
3068 Type *RedTy = Ctx.Types.inferScalarType(this);
3069 auto *SrcVecTy = cast<VectorType>(
3070 toVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF));
3071 unsigned Opcode = RecurrenceDescriptor::getOpcode(
3072 cast<VPReductionRecipe>(ExpressionRecipes.back())->getRecurrenceKind());
3073 switch (ExpressionType) {
3074 case ExpressionTypes::ExtendedReduction: {
3075 unsigned Opcode = RecurrenceDescriptor::getOpcode(
3076 cast<VPReductionRecipe>(ExpressionRecipes[1])->getRecurrenceKind());
3077 auto *ExtR = cast<VPWidenCastRecipe>(ExpressionRecipes[0]);
3078 auto *RedR = cast<VPReductionRecipe>(ExpressionRecipes.back());
3079
3080 if (RedR->isPartialReduction())
3081 return Ctx.TTI.getPartialReductionCost(
3082 Opcode, Ctx.Types.inferScalarType(getOperand(0)), nullptr, RedTy, VF,
3084 TargetTransformInfo::PR_None, std::nullopt, Ctx.CostKind,
3085 RedTy->isFloatingPointTy() ? std::optional{RedR->getFastMathFlags()}
3086 : std::nullopt);
3087 else if (!RedTy->isFloatingPointTy())
3088 // TTI::getExtendedReductionCost only supports integer types.
3089 return Ctx.TTI.getExtendedReductionCost(
3090 Opcode, ExtR->getOpcode() == Instruction::ZExt, RedTy, SrcVecTy,
3091 std::nullopt, Ctx.CostKind);
3092 else
3094 }
3095 case ExpressionTypes::MulAccReduction:
3096 return Ctx.TTI.getMulAccReductionCost(false, Opcode, RedTy, SrcVecTy,
3097 Ctx.CostKind);
3098
3099 case ExpressionTypes::ExtNegatedMulAccReduction:
3100 assert(Opcode == Instruction::Add && "Unexpected opcode");
3101 Opcode = Instruction::Sub;
3102 [[fallthrough]];
3103 case ExpressionTypes::ExtMulAccReduction: {
3104 auto *RedR = cast<VPReductionRecipe>(ExpressionRecipes.back());
3105 if (RedR->isPartialReduction()) {
3106 auto *Ext0R = cast<VPWidenCastRecipe>(ExpressionRecipes[0]);
3107 auto *Ext1R = cast<VPWidenCastRecipe>(ExpressionRecipes[1]);
3108 auto *Mul = cast<VPWidenRecipe>(ExpressionRecipes[2]);
3109 return Ctx.TTI.getPartialReductionCost(
3110 Opcode, Ctx.Types.inferScalarType(getOperand(0)),
3111 Ctx.Types.inferScalarType(getOperand(1)), RedTy, VF,
3113 Ext0R->getOpcode()),
3115 Ext1R->getOpcode()),
3116 Mul->getOpcode(), Ctx.CostKind,
3117 RedTy->isFloatingPointTy() ? std::optional{RedR->getFastMathFlags()}
3118 : std::nullopt);
3119 }
3120 return Ctx.TTI.getMulAccReductionCost(
3121 cast<VPWidenCastRecipe>(ExpressionRecipes.front())->getOpcode() ==
3122 Instruction::ZExt,
3123 Opcode, RedTy, SrcVecTy, Ctx.CostKind);
3124 }
3125 }
3126 llvm_unreachable("Unknown VPExpressionRecipe::ExpressionTypes enum");
3127}
3128
3130 return any_of(ExpressionRecipes, [](VPSingleDefRecipe *R) {
3131 return R->mayReadFromMemory() || R->mayWriteToMemory();
3132 });
3133}
3134
3136 assert(
3137 none_of(ExpressionRecipes,
3138 [](VPSingleDefRecipe *R) { return R->mayHaveSideEffects(); }) &&
3139 "expression cannot contain recipes with side-effects");
3140 return false;
3141}
3142
3144 // Cannot use vputils::isSingleScalar(), because all external operands
3145 // of the expression will be live-ins while bundled.
3146 auto *RR = dyn_cast<VPReductionRecipe>(ExpressionRecipes.back());
3147 return RR && !RR->isPartialReduction();
3148}
3149
3150#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3151
3153 VPSlotTracker &SlotTracker) const {
3154 O << Indent << "EXPRESSION ";
3156 O << " = ";
3157 auto *Red = cast<VPReductionRecipe>(ExpressionRecipes.back());
3158 unsigned Opcode = RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind());
3159
3160 switch (ExpressionType) {
3161 case ExpressionTypes::ExtendedReduction: {
3163 O << " + " << (Red->isPartialReduction() ? "partial." : "") << "reduce.";
3164 O << Instruction::getOpcodeName(Opcode) << " (";
3166 Red->printFlags(O);
3167
3168 auto *Ext0 = cast<VPWidenCastRecipe>(ExpressionRecipes[0]);
3169 O << Instruction::getOpcodeName(Ext0->getOpcode()) << " to "
3170 << *Ext0->getResultType();
3171 if (Red->isConditional()) {
3172 O << ", ";
3173 Red->getCondOp()->printAsOperand(O, SlotTracker);
3174 }
3175 O << ")";
3176 break;
3177 }
3178 case ExpressionTypes::ExtNegatedMulAccReduction: {
3180 O << " + " << (Red->isPartialReduction() ? "partial." : "") << "reduce.";
3182 RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind()))
3183 << " (sub (0, mul";
3184 auto *Mul = cast<VPWidenRecipe>(ExpressionRecipes[2]);
3185 Mul->printFlags(O);
3186 O << "(";
3188 auto *Ext0 = cast<VPWidenCastRecipe>(ExpressionRecipes[0]);
3189 O << " " << Instruction::getOpcodeName(Ext0->getOpcode()) << " to "
3190 << *Ext0->getResultType() << "), (";
3192 auto *Ext1 = cast<VPWidenCastRecipe>(ExpressionRecipes[1]);
3193 O << " " << Instruction::getOpcodeName(Ext1->getOpcode()) << " to "
3194 << *Ext1->getResultType() << ")";
3195 if (Red->isConditional()) {
3196 O << ", ";
3197 Red->getCondOp()->printAsOperand(O, SlotTracker);
3198 }
3199 O << "))";
3200 break;
3201 }
3202 case ExpressionTypes::MulAccReduction:
3203 case ExpressionTypes::ExtMulAccReduction: {
3205 O << " + " << (Red->isPartialReduction() ? "partial." : "") << "reduce.";
3207 RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind()))
3208 << " (";
3209 O << "mul";
3210 bool IsExtended = ExpressionType == ExpressionTypes::ExtMulAccReduction;
3211 auto *Mul = cast<VPWidenRecipe>(IsExtended ? ExpressionRecipes[2]
3212 : ExpressionRecipes[0]);
3213 Mul->printFlags(O);
3214 if (IsExtended)
3215 O << "(";
3217 if (IsExtended) {
3218 auto *Ext0 = cast<VPWidenCastRecipe>(ExpressionRecipes[0]);
3219 O << " " << Instruction::getOpcodeName(Ext0->getOpcode()) << " to "
3220 << *Ext0->getResultType() << "), (";
3221 } else {
3222 O << ", ";
3223 }
3225 if (IsExtended) {
3226 auto *Ext1 = cast<VPWidenCastRecipe>(ExpressionRecipes[1]);
3227 O << " " << Instruction::getOpcodeName(Ext1->getOpcode()) << " to "
3228 << *Ext1->getResultType() << ")";
3229 }
3230 if (Red->isConditional()) {
3231 O << ", ";
3232 Red->getCondOp()->printAsOperand(O, SlotTracker);
3233 }
3234 O << ")";
3235 break;
3236 }
3237 }
3238}
3239
3241 VPSlotTracker &SlotTracker) const {
3242 if (isPartialReduction())
3243 O << Indent << "PARTIAL-REDUCE ";
3244 else
3245 O << Indent << "REDUCE ";
3247 O << " = ";
3249 O << " +";
3250 printFlags(O);
3251 O << " reduce."
3254 << " (";
3256 if (isConditional()) {
3257 O << ", ";
3259 }
3260 O << ")";
3261}
3262
3264 VPSlotTracker &SlotTracker) const {
3265 O << Indent << "REDUCE ";
3267 O << " = ";
3269 O << " +";
3270 printFlags(O);
3271 O << " vp.reduce."
3274 << " (";
3276 O << ", ";
3278 if (isConditional()) {
3279 O << ", ";
3281 }
3282 O << ")";
3283}
3284
3285#endif
3286
3288 assert(IsSingleScalar &&
3289 "VPReplicateRecipes must be unrolled before ::execute");
3290 auto *Instr = getUnderlyingInstr();
3291 Instruction *Cloned = Instr->clone();
3292 if (!Instr->getType()->isVoidTy()) {
3293 Cloned->setName(Instr->getName() + ".cloned");
3294 Type *ResultTy = State.TypeAnalysis.inferScalarType(this);
3295 // The operands of the replicate recipe may have been narrowed, resulting in
3296 // a narrower result type. Update the type of the cloned instruction to the
3297 // correct type.
3298 if (ResultTy != Cloned->getType())
3299 Cloned->mutateType(ResultTy);
3300 }
3301
3302 applyFlags(*Cloned);
3303 applyMetadata(*Cloned);
3304
3305 if (hasPredicate())
3306 cast<CmpInst>(Cloned)->setPredicate(getPredicate());
3307
3308 // Replace the operands of the cloned instructions with their scalar
3309 // equivalents in the new loop.
3310 for (const auto &[Idx, V] : enumerate(operands()))
3311 Cloned->setOperand(Idx, State.get(V, true));
3312
3313 // Place the cloned scalar in the new loop.
3314 State.Builder.Insert(Cloned);
3315
3316 State.set(this, Cloned, true);
3317
3318 // If we just cloned a new assumption, add it the assumption cache.
3319 if (auto *II = dyn_cast<AssumeInst>(Cloned))
3320 State.AC->registerAssumption(II);
3321}
3322
3323/// Returns a SCEV expression for \p Ptr if it is a pointer computation for
3324/// which the legacy cost model computes a SCEV expression when computing the
3325/// address cost. Computing SCEVs for VPValues is incomplete and returns
3326/// SCEVCouldNotCompute in cases the legacy cost model can compute SCEVs. In
3327/// those cases we fall back to the legacy cost model. Otherwise return nullptr.
3328static const SCEV *getAddressAccessSCEV(const VPValue *Ptr,
3330 const Loop *L) {
3331 const SCEV *Addr = vputils::getSCEVExprForVPValue(Ptr, PSE, L);
3332 if (isa<SCEVCouldNotCompute>(Addr))
3333 return Addr;
3334
3335 return vputils::isAddressSCEVForCost(Addr, *PSE.getSE(), L) ? Addr : nullptr;
3336}
3337
3338/// Return true if \p R is a predicated load/store with a loop-invariant address
3339/// only masked by the header mask.
3341 const SCEV *PtrSCEV,
3342 VPCostContext &Ctx) {
3343 const VPRegionBlock *ParentRegion = R.getRegion();
3344 if (!ParentRegion || !ParentRegion->isReplicator() || !PtrSCEV ||
3345 !Ctx.PSE.getSE()->isLoopInvariant(PtrSCEV, Ctx.L))
3346 return false;
3347 auto *BOM =
3349 return vputils::isHeaderMask(BOM->getOperand(0), *ParentRegion->getPlan());
3350}
3351
3353 VPCostContext &Ctx) const {
3355 // VPReplicateRecipe may be cloned as part of an existing VPlan-to-VPlan
3356 // transform, avoid computing their cost multiple times for now.
3357 Ctx.SkipCostComputation.insert(UI);
3358
3359 if (VF.isScalable() && !isSingleScalar())
3361
3362 switch (UI->getOpcode()) {
3363 case Instruction::Alloca:
3364 if (VF.isScalable())
3366 return Ctx.TTI.getArithmeticInstrCost(
3367 Instruction::Mul, Ctx.Types.inferScalarType(this), Ctx.CostKind);
3368 case Instruction::GetElementPtr:
3369 // We mark this instruction as zero-cost because the cost of GEPs in
3370 // vectorized code depends on whether the corresponding memory instruction
3371 // is scalarized or not. Therefore, we handle GEPs with the memory
3372 // instruction cost.
3373 return 0;
3374 case Instruction::Call: {
3375 auto *CalledFn =
3377 Type *ResultTy = Ctx.Types.inferScalarType(this);
3379 return computeCallCost(CalledFn, ResultTy, ArgOps, isSingleScalar(), VF,
3380 Ctx);
3381 }
3382 case Instruction::Add:
3383 case Instruction::Sub:
3384 case Instruction::FAdd:
3385 case Instruction::FSub:
3386 case Instruction::Mul:
3387 case Instruction::FMul:
3388 case Instruction::FDiv:
3389 case Instruction::FRem:
3390 case Instruction::Shl:
3391 case Instruction::LShr:
3392 case Instruction::AShr:
3393 case Instruction::And:
3394 case Instruction::Or:
3395 case Instruction::Xor:
3396 case Instruction::ICmp:
3397 case Instruction::FCmp:
3399 Ctx) *
3400 (isSingleScalar() ? 1 : VF.getFixedValue());
3401 case Instruction::SDiv:
3402 case Instruction::UDiv:
3403 case Instruction::SRem:
3404 case Instruction::URem: {
3405 InstructionCost ScalarCost =
3407 if (isSingleScalar())
3408 return ScalarCost;
3409
3410 // If any of the operands is from a different replicate region and has its
3411 // cost skipped, it may have been forced to scalar. Fall back to legacy cost
3412 // model to avoid cost mis-match.
3413 if (any_of(operands(), [&Ctx, VF](VPValue *Op) {
3414 auto *PredR = dyn_cast<VPPredInstPHIRecipe>(Op);
3415 if (!PredR)
3416 return false;
3417 return Ctx.skipCostComputation(
3419 PredR->getOperand(0)->getUnderlyingValue()),
3420 VF.isVector());
3421 }))
3422 break;
3423
3424 ScalarCost = ScalarCost * VF.getFixedValue() +
3425 Ctx.getScalarizationOverhead(Ctx.Types.inferScalarType(this),
3426 to_vector(operands()), VF);
3427 // If the recipe is not predicated (i.e. not in a replicate region), return
3428 // the scalar cost. Otherwise handle predicated cost.
3429 if (!getRegion()->isReplicator())
3430 return ScalarCost;
3431
3432 // Account for the phi nodes that we will create.
3433 ScalarCost += VF.getFixedValue() *
3434 Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
3435 // Scale the cost by the probability of executing the predicated blocks.
3436 // This assumes the predicated block for each vector lane is equally
3437 // likely.
3438 ScalarCost /= Ctx.getPredBlockCostDivisor(UI->getParent());
3439 return ScalarCost;
3440 }
3441 case Instruction::Load:
3442 case Instruction::Store: {
3443 bool IsLoad = UI->getOpcode() == Instruction::Load;
3444 const VPValue *PtrOp = getOperand(!IsLoad);
3445 const SCEV *PtrSCEV = getAddressAccessSCEV(PtrOp, Ctx.PSE, Ctx.L);
3447 break;
3448
3449 Type *ValTy = Ctx.Types.inferScalarType(IsLoad ? this : getOperand(0));
3450 Type *ScalarPtrTy = Ctx.Types.inferScalarType(PtrOp);
3451 const Align Alignment = getLoadStoreAlignment(UI);
3452 unsigned AS = cast<PointerType>(ScalarPtrTy)->getAddressSpace();
3454 bool PreferVectorizedAddressing = Ctx.TTI.prefersVectorizedAddressing();
3455 bool UsedByLoadStoreAddress =
3456 !PreferVectorizedAddressing && vputils::isUsedByLoadStoreAddress(this);
3457 InstructionCost ScalarMemOpCost = Ctx.TTI.getMemoryOpCost(
3458 UI->getOpcode(), ValTy, Alignment, AS, Ctx.CostKind, OpInfo,
3459 UsedByLoadStoreAddress ? UI : nullptr);
3460
3461 // Check if this is a predicated load/store with a loop-invariant address
3462 // only masked by the header mask. If so, return the uniform mem op cost.
3463 if (isPredicatedUniformMemOpAfterTailFolding(*this, PtrSCEV, Ctx)) {
3464 InstructionCost UniformCost =
3465 ScalarMemOpCost +
3466 Ctx.TTI.getAddressComputationCost(ScalarPtrTy, /*SE=*/nullptr,
3467 /*Ptr=*/nullptr, Ctx.CostKind);
3468 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
3469 if (IsLoad) {
3470 return UniformCost +
3471 Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast,
3472 VectorTy, VectorTy, {}, Ctx.CostKind);
3473 }
3474
3475 VPValue *StoredVal = getOperand(0);
3476 if (!StoredVal->isDefinedOutsideLoopRegions())
3477 UniformCost += Ctx.TTI.getIndexedVectorInstrCostFromEnd(
3478 Instruction::ExtractElement, VectorTy, Ctx.CostKind, 0);
3479 return UniformCost;
3480 }
3481
3482 Type *PtrTy = isSingleScalar() ? ScalarPtrTy : toVectorTy(ScalarPtrTy, VF);
3483 InstructionCost ScalarCost =
3484 ScalarMemOpCost +
3485 Ctx.TTI.getAddressComputationCost(
3486 PtrTy, UsedByLoadStoreAddress ? nullptr : Ctx.PSE.getSE(), PtrSCEV,
3487 Ctx.CostKind);
3488 if (isSingleScalar())
3489 return ScalarCost;
3490
3491 SmallVector<const VPValue *> OpsToScalarize;
3492 Type *ResultTy = Type::getVoidTy(PtrTy->getContext());
3493 // Set ResultTy and OpsToScalarize, if scalarization is needed. Currently we
3494 // don't assign scalarization overhead in general, if the target prefers
3495 // vectorized addressing or the loaded value is used as part of an address
3496 // of another load or store.
3497 if (!UsedByLoadStoreAddress) {
3498 bool EfficientVectorLoadStore =
3499 Ctx.TTI.supportsEfficientVectorElementLoadStore();
3500 if (!(IsLoad && !PreferVectorizedAddressing) &&
3501 !(!IsLoad && EfficientVectorLoadStore))
3502 append_range(OpsToScalarize, operands());
3503
3504 if (!EfficientVectorLoadStore)
3505 ResultTy = Ctx.Types.inferScalarType(this);
3506 }
3507
3511 (ScalarCost * VF.getFixedValue()) +
3512 Ctx.getScalarizationOverhead(ResultTy, OpsToScalarize, VF, VIC, true);
3513
3514 const VPRegionBlock *ParentRegion = getRegion();
3515 if (ParentRegion && ParentRegion->isReplicator()) {
3516 if (!PtrSCEV)
3517 break;
3518 Cost /= Ctx.getPredBlockCostDivisor(UI->getParent());
3519 Cost += Ctx.TTI.getCFInstrCost(Instruction::CondBr, Ctx.CostKind);
3520
3521 auto *VecI1Ty = VectorType::get(
3522 IntegerType::getInt1Ty(Ctx.L->getHeader()->getContext()), VF);
3523 Cost += Ctx.TTI.getScalarizationOverhead(
3524 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
3525 /*Insert=*/false, /*Extract=*/true, Ctx.CostKind);
3526
3527 if (Ctx.useEmulatedMaskMemRefHack(this, VF)) {
3528 // Artificially setting to a high enough value to practically disable
3529 // vectorization with such operations.
3530 return 3000000;
3531 }
3532 }
3533 return Cost;
3534 }
3535 case Instruction::SExt:
3536 case Instruction::ZExt:
3537 case Instruction::FPToUI:
3538 case Instruction::FPToSI:
3539 case Instruction::FPExt:
3540 case Instruction::PtrToInt:
3541 case Instruction::PtrToAddr:
3542 case Instruction::IntToPtr:
3543 case Instruction::SIToFP:
3544 case Instruction::UIToFP:
3545 case Instruction::Trunc:
3546 case Instruction::FPTrunc:
3547 case Instruction::Select:
3548 case Instruction::AddrSpaceCast: {
3550 Ctx) *
3551 (isSingleScalar() ? 1 : VF.getFixedValue());
3552 }
3553 case Instruction::ExtractValue:
3554 case Instruction::InsertValue:
3555 return Ctx.TTI.getInsertExtractValueCost(getOpcode(), Ctx.CostKind);
3556 }
3557
3558 return Ctx.getLegacyCost(UI, VF);
3559}
3560
3562 Function *CalledFn, Type *ResultTy, ArrayRef<const VPValue *> ArgOps,
3563 bool IsSingleScalar, ElementCount VF, VPCostContext &Ctx) {
3565 ArgOps, [&](const VPValue *Op) { return Ctx.Types.inferScalarType(Op); });
3566
3567 Intrinsic::ID IntrinID = CalledFn->getIntrinsicID();
3568 auto GetIntrinsicCost = [&] {
3569 if (!IntrinID)
3571 return Ctx.TTI.getIntrinsicInstrCost(
3572 IntrinsicCostAttributes(IntrinID, ResultTy, Tys), Ctx.CostKind);
3573 };
3574
3575 if (IntrinID && VPCostContext::isFreeScalarIntrinsic(IntrinID)) {
3576 assert(GetIntrinsicCost() == 0 && "scalarizing intrinsic should be free");
3577 return 0;
3578 }
3579
3580 InstructionCost ScalarCallCost =
3581 Ctx.TTI.getCallInstrCost(CalledFn, ResultTy, Tys, Ctx.CostKind);
3582 if (IsSingleScalar) {
3583 ScalarCallCost = std::min(ScalarCallCost, GetIntrinsicCost());
3584 return ScalarCallCost;
3585 }
3586
3587 // Scalarization overhead is undefined for scalable VFs.
3588 if (VF.isScalable())
3590
3591 return ScalarCallCost * VF.getFixedValue() +
3592 Ctx.getScalarizationOverhead(ResultTy, ArgOps, VF);
3593}
3594
3595#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3597 VPSlotTracker &SlotTracker) const {
3598 O << Indent << (IsSingleScalar ? "CLONE " : "REPLICATE ");
3599
3600 if (!getUnderlyingInstr()->getType()->isVoidTy()) {
3602 O << " = ";
3603 }
3604 if (auto *CB = dyn_cast<CallBase>(getUnderlyingInstr())) {
3605 O << "call";
3606 printFlags(O);
3607 O << "@" << CB->getCalledFunction()->getName() << "(";
3609 O, [&O, &SlotTracker](VPValue *Op) {
3610 Op->printAsOperand(O, SlotTracker);
3611 });
3612 O << ")";
3613 } else {
3615 printFlags(O);
3617 }
3618
3619 // Find if the recipe is used by a widened recipe via an intervening
3620 // VPPredInstPHIRecipe. In this case, also pack the scalar values in a vector.
3621 if (any_of(users(), [](const VPUser *U) {
3622 if (auto *PredR = dyn_cast<VPPredInstPHIRecipe>(U))
3623 return !vputils::onlyScalarValuesUsed(PredR);
3624 return false;
3625 }))
3626 O << " (S->V)";
3627}
3628#endif
3629
3631 llvm_unreachable("recipe must be removed when dissolving replicate region");
3632}
3633
3635 VPCostContext &Ctx) const {
3636 // The legacy cost model doesn't assign costs to branches for individual
3637 // replicate regions. Match the current behavior in the VPlan cost model for
3638 // now.
3639 return 0;
3640}
3641
3643 llvm_unreachable("recipe must be removed when dissolving replicate region");
3644}
3645
3646#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3648 VPSlotTracker &SlotTracker) const {
3649 O << Indent << "PHI-PREDICATED-INSTRUCTION ";
3651 O << " = ";
3653}
3654#endif
3655
3657 VPCostContext &Ctx) const {
3658 const VPRecipeBase *R = getAsRecipe();
3660 unsigned AS = cast<PointerType>(Ctx.Types.inferScalarType(getAddr()))
3661 ->getAddressSpace();
3663 ? Instruction::Load
3664 : Instruction::Store;
3665
3666 if (!Consecutive) {
3667 // TODO: Using the original IR may not be accurate.
3668 // Currently, ARM will use the underlying IR to calculate gather/scatter
3669 // instruction cost.
3670 [[maybe_unused]] auto IsReverseMask = [this, R]() {
3671 VPValue *Mask = getMask();
3672 if (!Mask)
3673 return false;
3674
3677
3678 return match(Mask, m_Reverse(m_VPValue()));
3679 };
3680 assert(!IsReverseMask() &&
3681 "Inconsecutive memory access should not have reverse order");
3683 Type *PtrTy = Ptr->getType();
3684
3685 // If the address value is uniform across all lanes, then the address can be
3686 // calculated with scalar type and broadcast.
3688 PtrTy = toVectorTy(PtrTy, VF);
3689
3690 unsigned IID = isa<VPWidenLoadRecipe>(R) ? Intrinsic::masked_gather
3691 : isa<VPWidenStoreRecipe>(R) ? Intrinsic::masked_scatter
3692 : isa<VPWidenLoadEVLRecipe>(R) ? Intrinsic::vp_gather
3693 : Intrinsic::vp_scatter;
3694 return Ctx.TTI.getAddressComputationCost(PtrTy, nullptr, nullptr,
3695 Ctx.CostKind) +
3696 Ctx.TTI.getMemIntrinsicInstrCost(
3698 &Ingredient),
3699 Ctx.CostKind);
3700 }
3701
3703 if (IsMasked) {
3704 unsigned IID = isa<VPWidenLoadRecipe>(R) ? Intrinsic::masked_load
3705 : Intrinsic::masked_store;
3706 Cost += Ctx.TTI.getMemIntrinsicInstrCost(
3707 MemIntrinsicCostAttributes(IID, Ty, Alignment, AS), Ctx.CostKind);
3708 } else {
3709 TTI::OperandValueInfo OpInfo = Ctx.getOperandInfo(
3711 : R->getOperand(1));
3712 Cost += Ctx.TTI.getMemoryOpCost(Opcode, Ty, Alignment, AS, Ctx.CostKind,
3713 OpInfo, &Ingredient);
3714 }
3715 return Cost;
3716}
3717
3719 Type *ScalarDataTy = getLoadStoreType(&Ingredient);
3720 auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
3721 bool CreateGather = !isConsecutive();
3722
3723 auto &Builder = State.Builder;
3724 Value *Mask = nullptr;
3725 if (auto *VPMask = getMask())
3726 Mask = State.get(VPMask);
3727
3728 Value *Addr = State.get(getAddr(), /*IsScalar*/ !CreateGather);
3729 Value *NewLI;
3730 if (CreateGather) {
3731 NewLI = Builder.CreateMaskedGather(DataTy, Addr, Alignment, Mask, nullptr,
3732 "wide.masked.gather");
3733 } else if (Mask) {
3734 NewLI =
3735 Builder.CreateMaskedLoad(DataTy, Addr, Alignment, Mask,
3736 PoisonValue::get(DataTy), "wide.masked.load");
3737 } else {
3738 NewLI = Builder.CreateAlignedLoad(DataTy, Addr, Alignment, "wide.load");
3739 }
3741 State.set(this, NewLI);
3742}
3743
3744#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3746 VPSlotTracker &SlotTracker) const {
3747 O << Indent << "WIDEN ";
3749 O << " = load ";
3751}
3752#endif
3753
3755 Type *ScalarDataTy = getLoadStoreType(&Ingredient);
3756 auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
3757 bool CreateGather = !isConsecutive();
3758
3759 auto &Builder = State.Builder;
3760 CallInst *NewLI;
3761 Value *EVL = State.get(getEVL(), VPLane(0));
3762 Value *Addr = State.get(getAddr(), !CreateGather);
3763 Value *Mask = nullptr;
3764 if (VPValue *VPMask = getMask())
3765 Mask = State.get(VPMask);
3766 else
3767 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
3768
3769 if (CreateGather) {
3770 NewLI =
3771 Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, {Addr, Mask, EVL},
3772 nullptr, "wide.masked.gather");
3773 } else {
3774 NewLI = Builder.CreateIntrinsic(DataTy, Intrinsic::vp_load,
3775 {Addr, Mask, EVL}, nullptr, "vp.op.load");
3776 }
3777 NewLI->addParamAttr(
3779 applyMetadata(*NewLI);
3780 Instruction *Res = NewLI;
3781 State.set(this, Res);
3782}
3783
3785 VPCostContext &Ctx) const {
3786 if (!Consecutive || IsMasked)
3787 return VPWidenMemoryRecipe::computeCost(VF, Ctx);
3788
3789 // We need to use the getMemIntrinsicInstrCost() instead of getMemoryOpCost()
3790 // here because the EVL recipes using EVL to replace the tail mask. But in the
3791 // legacy model, it will always calculate the cost of mask.
3792 // TODO: Using getMemoryOpCost() instead of getMemIntrinsicInstrCost when we
3793 // don't need to compare to the legacy cost model.
3795 unsigned AS = cast<PointerType>(Ctx.Types.inferScalarType(getAddr()))
3796 ->getAddressSpace();
3797 return Ctx.TTI.getMemIntrinsicInstrCost(
3798 MemIntrinsicCostAttributes(Intrinsic::vp_load, Ty, Alignment, AS),
3799 Ctx.CostKind);
3800}
3801
3802#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3804 VPSlotTracker &SlotTracker) const {
3805 O << Indent << "WIDEN ";
3807 O << " = vp.load ";
3809}
3810#endif
3811
3813 VPValue *StoredVPValue = getStoredValue();
3814 bool CreateScatter = !isConsecutive();
3815
3816 auto &Builder = State.Builder;
3817
3818 Value *Mask = nullptr;
3819 if (auto *VPMask = getMask())
3820 Mask = State.get(VPMask);
3821
3822 Value *StoredVal = State.get(StoredVPValue);
3823 Value *Addr = State.get(getAddr(), /*IsScalar*/ !CreateScatter);
3824 Instruction *NewSI = nullptr;
3825 if (CreateScatter)
3826 NewSI = Builder.CreateMaskedScatter(StoredVal, Addr, Alignment, Mask);
3827 else if (Mask)
3828 NewSI = Builder.CreateMaskedStore(StoredVal, Addr, Alignment, Mask);
3829 else
3830 NewSI = Builder.CreateAlignedStore(StoredVal, Addr, Alignment);
3831 applyMetadata(*NewSI);
3832}
3833
3834#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3836 VPSlotTracker &SlotTracker) const {
3837 O << Indent << "WIDEN store ";
3839}
3840#endif
3841
3843 VPValue *StoredValue = getStoredValue();
3844 bool CreateScatter = !isConsecutive();
3845
3846 auto &Builder = State.Builder;
3847
3848 CallInst *NewSI = nullptr;
3849 Value *StoredVal = State.get(StoredValue);
3850 Value *EVL = State.get(getEVL(), VPLane(0));
3851 Value *Mask = nullptr;
3852 if (VPValue *VPMask = getMask())
3853 Mask = State.get(VPMask);
3854 else
3855 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
3856
3857 Value *Addr = State.get(getAddr(), !CreateScatter);
3858 if (CreateScatter) {
3859 NewSI = Builder.CreateIntrinsic(Type::getVoidTy(EVL->getContext()),
3860 Intrinsic::vp_scatter,
3861 {StoredVal, Addr, Mask, EVL});
3862 } else {
3863 NewSI = Builder.CreateIntrinsic(Type::getVoidTy(EVL->getContext()),
3864 Intrinsic::vp_store,
3865 {StoredVal, Addr, Mask, EVL});
3866 }
3867 NewSI->addParamAttr(
3869 applyMetadata(*NewSI);
3870}
3871
3873 VPCostContext &Ctx) const {
3874 if (!Consecutive || IsMasked)
3875 return VPWidenMemoryRecipe::computeCost(VF, Ctx);
3876
3877 // We need to use the getMemIntrinsicInstrCost() instead of getMemoryOpCost()
3878 // here because the EVL recipes using EVL to replace the tail mask. But in the
3879 // legacy model, it will always calculate the cost of mask.
3880 // TODO: Using getMemoryOpCost() instead of getMemIntrinsicInstrCost when we
3881 // don't need to compare to the legacy cost model.
3883 unsigned AS = cast<PointerType>(Ctx.Types.inferScalarType(getAddr()))
3884 ->getAddressSpace();
3885 return Ctx.TTI.getMemIntrinsicInstrCost(
3886 MemIntrinsicCostAttributes(Intrinsic::vp_store, Ty, Alignment, AS),
3887 Ctx.CostKind);
3888}
3889
3890#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3892 VPSlotTracker &SlotTracker) const {
3893 O << Indent << "WIDEN vp.store ";
3895}
3896#endif
3897
3899 VectorType *DstVTy, const DataLayout &DL) {
3900 // Verify that V is a vector type with same number of elements as DstVTy.
3901 auto VF = DstVTy->getElementCount();
3902 auto *SrcVecTy = cast<VectorType>(V->getType());
3903 assert(VF == SrcVecTy->getElementCount() && "Vector dimensions do not match");
3904 Type *SrcElemTy = SrcVecTy->getElementType();
3905 Type *DstElemTy = DstVTy->getElementType();
3906 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3907 "Vector elements must have same size");
3908
3909 // Do a direct cast if element types are castable.
3910 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3911 return Builder.CreateBitOrPointerCast(V, DstVTy);
3912 }
3913 // V cannot be directly casted to desired vector type.
3914 // May happen when V is a floating point vector but DstVTy is a vector of
3915 // pointers or vice-versa. Handle this using a two-step bitcast using an
3916 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3917 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3918 "Only one type should be a pointer type");
3919 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3920 "Only one type should be a floating point type");
3921 Type *IntTy =
3922 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3923 auto *VecIntTy = VectorType::get(IntTy, VF);
3924 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3925 return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
3926}
3927
3928/// Return a vector containing interleaved elements from multiple
3929/// smaller input vectors.
3931 const Twine &Name) {
3932 unsigned Factor = Vals.size();
3933 assert(Factor > 1 && "Tried to interleave invalid number of vectors");
3934
3935 VectorType *VecTy = cast<VectorType>(Vals[0]->getType());
3936#ifndef NDEBUG
3937 for (Value *Val : Vals)
3938 assert(Val->getType() == VecTy && "Tried to interleave mismatched types");
3939#endif
3940
3941 // Scalable vectors cannot use arbitrary shufflevectors (only splats), so
3942 // must use intrinsics to interleave.
3943 if (VecTy->isScalableTy()) {
3944 assert(Factor <= 8 && "Unsupported interleave factor for scalable vectors");
3945 return Builder.CreateVectorInterleave(Vals, Name);
3946 }
3947
3948 // Fixed length. Start by concatenating all vectors into a wide vector.
3949 Value *WideVec = concatenateVectors(Builder, Vals);
3950
3951 // Interleave the elements into the wide vector.
3952 const unsigned NumElts = VecTy->getElementCount().getFixedValue();
3953 return Builder.CreateShuffleVector(
3954 WideVec, createInterleaveMask(NumElts, Factor), Name);
3955}
3956
3957// Try to vectorize the interleave group that \p Instr belongs to.
3958//
3959// E.g. Translate following interleaved load group (factor = 3):
3960// for (i = 0; i < N; i+=3) {
3961// R = Pic[i]; // Member of index 0
3962// G = Pic[i+1]; // Member of index 1
3963// B = Pic[i+2]; // Member of index 2
3964// ... // do something to R, G, B
3965// }
3966// To:
3967// %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B
3968// %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements
3969// %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements
3970// %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements
3971//
3972// Or translate following interleaved store group (factor = 3):
3973// for (i = 0; i < N; i+=3) {
3974// ... do something to R, G, B
3975// Pic[i] = R; // Member of index 0
3976// Pic[i+1] = G; // Member of index 1
3977// Pic[i+2] = B; // Member of index 2
3978// }
3979// To:
3980// %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
3981// %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
3982// %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
3983// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements
3984// store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B
3986 assert((!needsMaskForGaps() || !State.VF.isScalable()) &&
3987 "Masking gaps for scalable vectors is not yet supported.");
3989 Instruction *Instr = Group->getInsertPos();
3990
3991 // Prepare for the vector type of the interleaved load/store.
3992 Type *ScalarTy = getLoadStoreType(Instr);
3993 unsigned InterleaveFactor = Group->getFactor();
3994 auto *VecTy = VectorType::get(ScalarTy, State.VF * InterleaveFactor);
3995
3996 VPValue *BlockInMask = getMask();
3997 VPValue *Addr = getAddr();
3998 Value *ResAddr = State.get(Addr, VPLane(0));
3999
4000 auto CreateGroupMask = [&BlockInMask, &State,
4001 &InterleaveFactor](Value *MaskForGaps) -> Value * {
4002 if (State.VF.isScalable()) {
4003 assert(!MaskForGaps && "Interleaved groups with gaps are not supported.");
4004 assert(InterleaveFactor <= 8 &&
4005 "Unsupported deinterleave factor for scalable vectors");
4006 auto *ResBlockInMask = State.get(BlockInMask);
4007 SmallVector<Value *> Ops(InterleaveFactor, ResBlockInMask);
4008 return interleaveVectors(State.Builder, Ops, "interleaved.mask");
4009 }
4010
4011 if (!BlockInMask)
4012 return MaskForGaps;
4013
4014 Value *ResBlockInMask = State.get(BlockInMask);
4015 Value *ShuffledMask = State.Builder.CreateShuffleVector(
4016 ResBlockInMask,
4017 createReplicatedMask(InterleaveFactor, State.VF.getFixedValue()),
4018 "interleaved.mask");
4019 return MaskForGaps ? State.Builder.CreateBinOp(Instruction::And,
4020 ShuffledMask, MaskForGaps)
4021 : ShuffledMask;
4022 };
4023
4024 const DataLayout &DL = Instr->getDataLayout();
4025 // Vectorize the interleaved load group.
4026 if (isa<LoadInst>(Instr)) {
4027 Value *MaskForGaps = nullptr;
4028 if (needsMaskForGaps()) {
4029 MaskForGaps =
4030 createBitMaskForGaps(State.Builder, State.VF.getFixedValue(), *Group);
4031 assert(MaskForGaps && "Mask for Gaps is required but it is null");
4032 }
4033
4034 Instruction *NewLoad;
4035 if (BlockInMask || MaskForGaps) {
4036 Value *GroupMask = CreateGroupMask(MaskForGaps);
4037 Value *PoisonVec = PoisonValue::get(VecTy);
4038 NewLoad = State.Builder.CreateMaskedLoad(VecTy, ResAddr,
4039 Group->getAlign(), GroupMask,
4040 PoisonVec, "wide.masked.vec");
4041 } else
4042 NewLoad = State.Builder.CreateAlignedLoad(VecTy, ResAddr,
4043 Group->getAlign(), "wide.vec");
4044 applyMetadata(*NewLoad);
4045 // TODO: Also manage existing metadata using VPIRMetadata.
4046 Group->addMetadata(NewLoad);
4047
4049 if (VecTy->isScalableTy()) {
4050 // Scalable vectors cannot use arbitrary shufflevectors (only splats),
4051 // so must use intrinsics to deinterleave.
4052 assert(InterleaveFactor <= 8 &&
4053 "Unsupported deinterleave factor for scalable vectors");
4054 NewLoad = State.Builder.CreateIntrinsic(
4055 Intrinsic::getDeinterleaveIntrinsicID(InterleaveFactor),
4056 NewLoad->getType(), NewLoad,
4057 /*FMFSource=*/nullptr, "strided.vec");
4058 }
4059
4060 auto CreateStridedVector = [&InterleaveFactor, &State,
4061 &NewLoad](unsigned Index) -> Value * {
4062 assert(Index < InterleaveFactor && "Illegal group index");
4063 if (State.VF.isScalable())
4064 return State.Builder.CreateExtractValue(NewLoad, Index);
4065
4066 // For fixed length VF, use shuffle to extract the sub-vectors from the
4067 // wide load.
4068 auto StrideMask =
4069 createStrideMask(Index, InterleaveFactor, State.VF.getFixedValue());
4070 return State.Builder.CreateShuffleVector(NewLoad, StrideMask,
4071 "strided.vec");
4072 };
4073
4074 for (unsigned I = 0, J = 0; I < InterleaveFactor; ++I) {
4075 Instruction *Member = Group->getMember(I);
4076
4077 // Skip the gaps in the group.
4078 if (!Member)
4079 continue;
4080
4081 Value *StridedVec = CreateStridedVector(I);
4082
4083 // If this member has different type, cast the result type.
4084 if (Member->getType() != ScalarTy) {
4085 VectorType *OtherVTy = VectorType::get(Member->getType(), State.VF);
4086 StridedVec =
4087 createBitOrPointerCast(State.Builder, StridedVec, OtherVTy, DL);
4088 }
4089
4090 if (Group->isReverse())
4091 StridedVec = State.Builder.CreateVectorReverse(StridedVec, "reverse");
4092
4093 State.set(VPDefs[J], StridedVec);
4094 ++J;
4095 }
4096 return;
4097 }
4098
4099 // The sub vector type for current instruction.
4100 auto *SubVT = VectorType::get(ScalarTy, State.VF);
4101
4102 // Vectorize the interleaved store group.
4103 Value *MaskForGaps =
4104 createBitMaskForGaps(State.Builder, State.VF.getKnownMinValue(), *Group);
4105 assert(((MaskForGaps != nullptr) == needsMaskForGaps()) &&
4106 "Mismatch between NeedsMaskForGaps and MaskForGaps");
4107 ArrayRef<VPValue *> StoredValues = getStoredValues();
4108 // Collect the stored vector from each member.
4109 SmallVector<Value *, 4> StoredVecs;
4110 unsigned StoredIdx = 0;
4111 for (unsigned i = 0; i < InterleaveFactor; i++) {
4112 assert((Group->getMember(i) || MaskForGaps) &&
4113 "Fail to get a member from an interleaved store group");
4114 Instruction *Member = Group->getMember(i);
4115
4116 // Skip the gaps in the group.
4117 if (!Member) {
4118 Value *Undef = PoisonValue::get(SubVT);
4119 StoredVecs.push_back(Undef);
4120 continue;
4121 }
4122
4123 Value *StoredVec = State.get(StoredValues[StoredIdx]);
4124 ++StoredIdx;
4125
4126 if (Group->isReverse())
4127 StoredVec = State.Builder.CreateVectorReverse(StoredVec, "reverse");
4128
4129 // If this member has different type, cast it to a unified type.
4130
4131 if (StoredVec->getType() != SubVT)
4132 StoredVec = createBitOrPointerCast(State.Builder, StoredVec, SubVT, DL);
4133
4134 StoredVecs.push_back(StoredVec);
4135 }
4136
4137 // Interleave all the smaller vectors into one wider vector.
4138 Value *IVec = interleaveVectors(State.Builder, StoredVecs, "interleaved.vec");
4139 Instruction *NewStoreInstr;
4140 if (BlockInMask || MaskForGaps) {
4141 Value *GroupMask = CreateGroupMask(MaskForGaps);
4142 NewStoreInstr = State.Builder.CreateMaskedStore(
4143 IVec, ResAddr, Group->getAlign(), GroupMask);
4144 } else
4145 NewStoreInstr =
4146 State.Builder.CreateAlignedStore(IVec, ResAddr, Group->getAlign());
4147
4148 applyMetadata(*NewStoreInstr);
4149 // TODO: Also manage existing metadata using VPIRMetadata.
4150 Group->addMetadata(NewStoreInstr);
4151}
4152
4153#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4155 VPSlotTracker &SlotTracker) const {
4157 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
4158 IG->getInsertPos()->printAsOperand(O, false);
4159 O << ", ";
4161 VPValue *Mask = getMask();
4162 if (Mask) {
4163 O << ", ";
4164 Mask->printAsOperand(O, SlotTracker);
4165 }
4166
4167 unsigned OpIdx = 0;
4168 for (unsigned i = 0; i < IG->getFactor(); ++i) {
4169 if (!IG->getMember(i))
4170 continue;
4171 if (getNumStoreOperands() > 0) {
4172 O << "\n" << Indent << " store ";
4174 O << " to index " << i;
4175 } else {
4176 O << "\n" << Indent << " ";
4178 O << " = load from index " << i;
4179 }
4180 ++OpIdx;
4181 }
4182}
4183#endif
4184
4186 assert(State.VF.isScalable() &&
4187 "Only support scalable VF for EVL tail-folding.");
4189 "Masking gaps for scalable vectors is not yet supported.");
4191 Instruction *Instr = Group->getInsertPos();
4192
4193 // Prepare for the vector type of the interleaved load/store.
4194 Type *ScalarTy = getLoadStoreType(Instr);
4195 unsigned InterleaveFactor = Group->getFactor();
4196 assert(InterleaveFactor <= 8 &&
4197 "Unsupported deinterleave/interleave factor for scalable vectors");
4198 ElementCount WideVF = State.VF * InterleaveFactor;
4199 auto *VecTy = VectorType::get(ScalarTy, WideVF);
4200
4201 VPValue *Addr = getAddr();
4202 Value *ResAddr = State.get(Addr, VPLane(0));
4203 Value *EVL = State.get(getEVL(), VPLane(0));
4204 Value *InterleaveEVL = State.Builder.CreateMul(
4205 EVL, ConstantInt::get(EVL->getType(), InterleaveFactor), "interleave.evl",
4206 /* NUW= */ true, /* NSW= */ true);
4207 LLVMContext &Ctx = State.Builder.getContext();
4208
4209 Value *GroupMask = nullptr;
4210 if (VPValue *BlockInMask = getMask()) {
4211 SmallVector<Value *> Ops(InterleaveFactor, State.get(BlockInMask));
4212 GroupMask = interleaveVectors(State.Builder, Ops, "interleaved.mask");
4213 } else {
4214 GroupMask =
4215 State.Builder.CreateVectorSplat(WideVF, State.Builder.getTrue());
4216 }
4217
4218 // Vectorize the interleaved load group.
4219 if (isa<LoadInst>(Instr)) {
4220 CallInst *NewLoad = State.Builder.CreateIntrinsic(
4221 VecTy, Intrinsic::vp_load, {ResAddr, GroupMask, InterleaveEVL}, nullptr,
4222 "wide.vp.load");
4223 NewLoad->addParamAttr(0,
4224 Attribute::getWithAlignment(Ctx, Group->getAlign()));
4225
4226 applyMetadata(*NewLoad);
4227 // TODO: Also manage existing metadata using VPIRMetadata.
4228 Group->addMetadata(NewLoad);
4229
4230 // Scalable vectors cannot use arbitrary shufflevectors (only splats),
4231 // so must use intrinsics to deinterleave.
4232 NewLoad = State.Builder.CreateIntrinsic(
4233 Intrinsic::getDeinterleaveIntrinsicID(InterleaveFactor),
4234 NewLoad->getType(), NewLoad,
4235 /*FMFSource=*/nullptr, "strided.vec");
4236
4237 const DataLayout &DL = Instr->getDataLayout();
4238 for (unsigned I = 0, J = 0; I < InterleaveFactor; ++I) {
4239 Instruction *Member = Group->getMember(I);
4240 // Skip the gaps in the group.
4241 if (!Member)
4242 continue;
4243
4244 Value *StridedVec = State.Builder.CreateExtractValue(NewLoad, I);
4245 // If this member has different type, cast the result type.
4246 if (Member->getType() != ScalarTy) {
4247 VectorType *OtherVTy = VectorType::get(Member->getType(), State.VF);
4248 StridedVec =
4249 createBitOrPointerCast(State.Builder, StridedVec, OtherVTy, DL);
4250 }
4251
4252 State.set(getVPValue(J), StridedVec);
4253 ++J;
4254 }
4255 return;
4256 } // End for interleaved load.
4257
4258 // The sub vector type for current instruction.
4259 auto *SubVT = VectorType::get(ScalarTy, State.VF);
4260 // Vectorize the interleaved store group.
4261 ArrayRef<VPValue *> StoredValues = getStoredValues();
4262 // Collect the stored vector from each member.
4263 SmallVector<Value *, 4> StoredVecs;
4264 const DataLayout &DL = Instr->getDataLayout();
4265 for (unsigned I = 0, StoredIdx = 0; I < InterleaveFactor; I++) {
4266 Instruction *Member = Group->getMember(I);
4267 // Skip the gaps in the group.
4268 if (!Member) {
4269 StoredVecs.push_back(PoisonValue::get(SubVT));
4270 continue;
4271 }
4272
4273 Value *StoredVec = State.get(StoredValues[StoredIdx]);
4274 // If this member has different type, cast it to a unified type.
4275 if (StoredVec->getType() != SubVT)
4276 StoredVec = createBitOrPointerCast(State.Builder, StoredVec, SubVT, DL);
4277
4278 StoredVecs.push_back(StoredVec);
4279 ++StoredIdx;
4280 }
4281
4282 // Interleave all the smaller vectors into one wider vector.
4283 Value *IVec = interleaveVectors(State.Builder, StoredVecs, "interleaved.vec");
4284 CallInst *NewStore =
4285 State.Builder.CreateIntrinsic(Type::getVoidTy(Ctx), Intrinsic::vp_store,
4286 {IVec, ResAddr, GroupMask, InterleaveEVL});
4287 NewStore->addParamAttr(1,
4288 Attribute::getWithAlignment(Ctx, Group->getAlign()));
4289
4290 applyMetadata(*NewStore);
4291 // TODO: Also manage existing metadata using VPIRMetadata.
4292 Group->addMetadata(NewStore);
4293}
4294
4295#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4297 VPSlotTracker &SlotTracker) const {
4299 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
4300 IG->getInsertPos()->printAsOperand(O, false);
4301 O << ", ";
4303 O << ", ";
4305 if (VPValue *Mask = getMask()) {
4306 O << ", ";
4307 Mask->printAsOperand(O, SlotTracker);
4308 }
4309
4310 unsigned OpIdx = 0;
4311 for (unsigned i = 0; i < IG->getFactor(); ++i) {
4312 if (!IG->getMember(i))
4313 continue;
4314 if (getNumStoreOperands() > 0) {
4315 O << "\n" << Indent << " vp.store ";
4317 O << " to index " << i;
4318 } else {
4319 O << "\n" << Indent << " ";
4321 O << " = vp.load from index " << i;
4322 }
4323 ++OpIdx;
4324 }
4325}
4326#endif
4327
4329 VPCostContext &Ctx) const {
4330 Instruction *InsertPos = getInsertPos();
4331 // Find the VPValue index of the interleave group. We need to skip gaps.
4332 unsigned InsertPosIdx = 0;
4333 for (unsigned Idx = 0; IG->getFactor(); ++Idx)
4334 if (auto *Member = IG->getMember(Idx)) {
4335 if (Member == InsertPos)
4336 break;
4337 InsertPosIdx++;
4338 }
4339 Type *ValTy = Ctx.Types.inferScalarType(
4340 getNumDefinedValues() > 0 ? getVPValue(InsertPosIdx)
4341 : getStoredValues()[InsertPosIdx]);
4342 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
4343 unsigned AS = cast<PointerType>(Ctx.Types.inferScalarType(getAddr()))
4344 ->getAddressSpace();
4345
4346 unsigned InterleaveFactor = IG->getFactor();
4347 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
4348
4349 // Holds the indices of existing members in the interleaved group.
4351 for (unsigned IF = 0; IF < InterleaveFactor; IF++)
4352 if (IG->getMember(IF))
4353 Indices.push_back(IF);
4354
4355 // Calculate the cost of the whole interleaved group.
4356 InstructionCost Cost = Ctx.TTI.getInterleavedMemoryOpCost(
4357 InsertPos->getOpcode(), WideVecTy, IG->getFactor(), Indices,
4358 IG->getAlign(), AS, Ctx.CostKind, getMask(), NeedsMaskForGaps);
4359
4360 if (!IG->isReverse())
4361 return Cost;
4362
4363 return Cost + IG->getNumMembers() *
4364 Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Reverse,
4365 VectorTy, VectorTy, {}, Ctx.CostKind,
4366 0);
4367}
4368
4370 return vputils::onlyScalarValuesUsed(this) &&
4371 (!IsScalable || vputils::onlyFirstLaneUsed(this));
4372}
4373
4374#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4376 raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const {
4377 assert((getNumOperands() == 3 || getNumOperands() == 5) &&
4378 "unexpected number of operands");
4379 O << Indent << "EMIT ";
4381 O << " = WIDEN-POINTER-INDUCTION ";
4383 O << ", ";
4385 O << ", ";
4387 if (getNumOperands() == 5) {
4388 O << ", ";
4390 O << ", ";
4392 }
4393}
4394
4396 VPSlotTracker &SlotTracker) const {
4397 O << Indent << "EMIT ";
4399 O << " = EXPAND SCEV " << *Expr;
4400}
4401#endif
4402
4404 Value *CanonicalIV = State.get(getOperand(0), /*IsScalar*/ true);
4405 Type *STy = CanonicalIV->getType();
4406 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
4407 ElementCount VF = State.VF;
4408 Value *VStart = VF.isScalar()
4409 ? CanonicalIV
4410 : Builder.CreateVectorSplat(VF, CanonicalIV, "broadcast");
4411 Value *VStep = Builder.CreateElementCount(
4412 STy, VF.multiplyCoefficientBy(getUnrollPart(*this)));
4413 if (VF.isVector()) {
4414 VStep = Builder.CreateVectorSplat(VF, VStep);
4415 VStep =
4416 Builder.CreateAdd(VStep, Builder.CreateStepVector(VStep->getType()));
4417 }
4418 Value *CanonicalVectorIV = Builder.CreateAdd(VStart, VStep, "vec.iv");
4419 State.set(this, CanonicalVectorIV);
4420}
4421
4422#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4424 VPSlotTracker &SlotTracker) const {
4425 O << Indent << "EMIT ";
4427 O << " = WIDEN-CANONICAL-INDUCTION ";
4429}
4430#endif
4431
4433 auto &Builder = State.Builder;
4434 // Create a vector from the initial value.
4435 auto *VectorInit = getStartValue()->getLiveInIRValue();
4436
4437 Type *VecTy = State.VF.isScalar()
4438 ? VectorInit->getType()
4439 : VectorType::get(VectorInit->getType(), State.VF);
4440
4441 BasicBlock *VectorPH =
4442 State.CFG.VPBB2IRBB.at(getParent()->getCFGPredecessor(0));
4443 if (State.VF.isVector()) {
4444 auto *IdxTy = Builder.getInt32Ty();
4445 auto *One = ConstantInt::get(IdxTy, 1);
4446 IRBuilder<>::InsertPointGuard Guard(Builder);
4447 Builder.SetInsertPoint(VectorPH->getTerminator());
4448 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, State.VF);
4449 auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4450 VectorInit = Builder.CreateInsertElement(
4451 PoisonValue::get(VecTy), VectorInit, LastIdx, "vector.recur.init");
4452 }
4453
4454 // Create a phi node for the new recurrence.
4455 PHINode *Phi = PHINode::Create(VecTy, 2, "vector.recur");
4456 Phi->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
4457 Phi->addIncoming(VectorInit, VectorPH);
4458 State.set(this, Phi);
4459}
4460
4463 VPCostContext &Ctx) const {
4464 if (VF.isScalar())
4465 return Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
4466
4467 return 0;
4468}
4469
4470#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4472 raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const {
4473 O << Indent << "FIRST-ORDER-RECURRENCE-PHI ";
4475 O << " = phi ";
4477}
4478#endif
4479
4481 // Reductions do not have to start at zero. They can start with
4482 // any loop invariant values.
4483 VPValue *StartVPV = getStartValue();
4484
4485 // In order to support recurrences we need to be able to vectorize Phi nodes.
4486 // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4487 // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4488 // this value when we vectorize all of the instructions that use the PHI.
4489 BasicBlock *VectorPH =
4490 State.CFG.VPBB2IRBB.at(getParent()->getCFGPredecessor(0));
4491 bool ScalarPHI = State.VF.isScalar() || isInLoop();
4492 Value *StartV = State.get(StartVPV, ScalarPHI);
4493 Type *VecTy = StartV->getType();
4494
4495 BasicBlock *HeaderBB = State.CFG.PrevBB;
4496 assert(State.CurrentParentLoop->getHeader() == HeaderBB &&
4497 "recipe must be in the vector loop header");
4498 auto *Phi = PHINode::Create(VecTy, 2, "vec.phi");
4499 Phi->insertBefore(HeaderBB->getFirstInsertionPt());
4500 State.set(this, Phi, isInLoop());
4501
4502 Phi->addIncoming(StartV, VectorPH);
4503}
4504
4505#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4507 VPSlotTracker &SlotTracker) const {
4508 O << Indent << "WIDEN-REDUCTION-PHI ";
4509
4511 O << " = phi";
4512 printFlags(O);
4514 if (getVFScaleFactor() > 1)
4515 O << " (VF scaled by 1/" << getVFScaleFactor() << ")";
4516}
4517#endif
4518
4520 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
4521 return vputils::onlyFirstLaneUsed(this);
4522}
4523
4525 Value *Op0 = State.get(getOperand(0));
4526 Type *VecTy = Op0->getType();
4527 Instruction *VecPhi = State.Builder.CreatePHI(VecTy, 2, Name);
4528 State.set(this, VecPhi);
4529}
4530
4532 VPCostContext &Ctx) const {
4533 return Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
4534}
4535
4536#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4538 VPSlotTracker &SlotTracker) const {
4539 O << Indent << "WIDEN-PHI ";
4540
4542 O << " = phi ";
4544}
4545#endif
4546
4548 BasicBlock *VectorPH =
4549 State.CFG.VPBB2IRBB.at(getParent()->getCFGPredecessor(0));
4550 Value *StartMask = State.get(getOperand(0));
4551 PHINode *Phi =
4552 State.Builder.CreatePHI(StartMask->getType(), 2, "active.lane.mask");
4553 Phi->addIncoming(StartMask, VectorPH);
4554 State.set(this, Phi);
4555}
4556
4557#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4559 VPSlotTracker &SlotTracker) const {
4560 O << Indent << "ACTIVE-LANE-MASK-PHI ";
4561
4563 O << " = phi ";
4565}
4566#endif
4567
4568#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4570 raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const {
4571 O << Indent << "CURRENT-ITERATION-PHI ";
4572
4574 O << " = phi ";
4576}
4577#endif
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
AMDGPU Lower Kernel Arguments
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const Function * getParent(const Value *V)
#define X(NUM, ENUM, NAME)
Definition ELF.h:853
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Value * getPointer(Value *Ptr)
iv users
Definition IVUsers.cpp:48
static std::pair< Value *, APInt > getMask(Value *WideMask, unsigned Factor, ElementCount LeafValueEC)
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
This file provides a LoopVectorizationPlanner class.
static const SCEV * getAddressAccessSCEV(Value *Ptr, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets the address access SCEV for Ptr, if it should be used for cost modeling according to isAddressSC...
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
static const Function * getCalledFunction(const Value *V)
static bool isOrdered(const Instruction *I)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
This file defines less commonly used SmallVector utilities.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:119
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file contains the declarations of different VPlan-related auxiliary helpers.
static bool isPredicatedUniformMemOpAfterTailFolding(const VPReplicateRecipe &R, const SCEV *PtrSCEV, VPCostContext &Ctx)
Return true if R is a predicated load/store with a loop-invariant address only masked by the header m...
static Value * interleaveVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vals, const Twine &Name)
Return a vector containing interleaved elements from multiple smaller input vectors.
static Value * createBitOrPointerCast(IRBuilderBase &Builder, Value *V, VectorType *DstVTy, const DataLayout &DL)
static Instruction::BinaryOps getSubRecurOpcode(RecurKind Kind)
SmallVector< Value *, 2 > VectorParts
static unsigned getCalledFnOperandIndex(const VPInstruction &VPI)
For call VPInstructions, return the operand index of the called function.
This file contains the declarations of the Vectorization Plan base classes:
void printAsOperand(OutputBuffer &OB, Prec P=Prec::Default, bool StrictlyWorse=false) const
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
Get the array size.
Definition ArrayRef.h:141
bool empty() const
Check if the array is empty.
Definition ArrayRef.h:136
static LLVM_ABI Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:986
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
static LLVM_ABI StringRef getPredicateName(Predicate P)
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
A debug info location.
Definition DebugLoc.h:123
static DebugLoc getUnknown()
Definition DebugLoc.h:161
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:324
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition TypeSize.h:312
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
LLVM_ABI void print(raw_ostream &O) const
Print fast-math flags to O.
Definition Operator.cpp:283
void setAllowContract(bool B=true)
Definition FMF.h:93
bool noSignedZeros() const
Definition FMF.h:70
bool noInfs() const
Definition FMF.h:69
void setAllowReciprocal(bool B=true)
Definition FMF.h:90
bool allowReciprocal() const
Definition FMF.h:71
void setNoSignedZeros(bool B=true)
Definition FMF.h:87
bool allowReassoc() const
Flag queries.
Definition FMF.h:67
bool approxFunc() const
Definition FMF.h:73
void setNoNaNs(bool B=true)
Definition FMF.h:81
void setAllowReassoc(bool B=true)
Flag setters.
Definition FMF.h:78
bool noNaNs() const
Definition FMF.h:68
void setApproxFunc(bool B=true)
Definition FMF.h:96
void setNoInfs(bool B=true)
Definition FMF.h:84
bool allowContract() const
Definition FMF.h:72
Class to represent function types.
Type * getParamType(unsigned i) const
Parameter type accessors.
bool willReturn() const
Determine if the function will return.
Definition Function.h:669
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:246
bool doesNotThrow() const
Determine if the function cannot unwind.
Definition Function.h:602
bool doesNotAccessMemory() const
Determine if the function does not access memory.
Definition Function.cpp:867
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:216
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags none()
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Definition IRBuilder.h:2627
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition IRBuilder.h:571
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition IRBuilder.h:2681
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition IRBuilder.h:2615
LLVM_ABI Value * CreateVectorSpliceRight(Value *V1, Value *V2, Value *Offset, const Twine &Name="")
Create a vector.splice.right intrinsic call, or a shufflevector that produces the same result if the ...
CondBrInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Definition IRBuilder.h:1238
LLVM_ABI Value * CreateSelectFMF(Value *C, Value *True, Value *False, FMFSource FMFSource, const Twine &Name="", Instruction *MDFrom=nullptr)
LLVM_ABI Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition IRBuilder.h:2674
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > OverloadTypes, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="", ArrayRef< OperandBundleDef > OpBundles={})
Create a call to intrinsic ID with Args, mangled using OverloadTypes.
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition IRBuilder.h:2693
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition IRBuilder.h:586
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2091
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition IRBuilder.h:352
LLVM_ABI Value * CreateVectorReverse(Value *V, const Twine &Name="")
Return a vector value that contains the vector V reversed.
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2378
LLVM_ABI CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
Value * CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition IRBuilder.h:1782
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition IRBuilder.h:529
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2508
Value * CreateNot(Value *V, const Twine &Name="")
Definition IRBuilder.h:1866
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2374
Value * CreateCountTrailingZeroElems(Type *ResTy, Value *Mask, bool ZeroIsPoison=true, const Twine &Name="")
Create a call to llvm.experimental_cttz_elts.
Definition IRBuilder.h:1176
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1461
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition IRBuilder.h:2120
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1444
ConstantInt * getFalse()
Get the constant value for i1 false.
Definition IRBuilder.h:514
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:1753
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2386
Value * CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition IRBuilder.h:1790
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2484
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1614
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1478
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2858
static InstructionCost getInvalid(CostType Val=0)
bool isCast() const
bool isBinaryOp() const
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isUnaryOp() const
The group of interleaved loads/stores sharing the same stride and close to each other.
uint32_t getFactor() const
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
bool isReverse() const
InstTy * getInsertPos() const
void addMetadata(InstTy *NewInst) const
Add metadata (e.g.
Align getAlign() const
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Information for memory intrinsic cost model.
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static LLVM_ABI bool isSubRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is for a sub operation.
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
This class represents an analyzed expression in the program.
This class represents the LLVM 'select' instruction.
This class provides computation of slot numbers for LLVM Assembly writing.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
VectorInstrContext
Represents a hint about the context in which an insert/extract is used.
@ None
The insert/extract is not used with a load/store.
@ Load
The value being inserted comes from a load (InsertElement only).
@ Store
The extracted value is stored (ExtractElement only).
static LLVM_ABI PartialReductionExtendKind getPartialReductionExtendKind(Instruction *I)
Get the kind of extension that an instruction represents.
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
@ TCC_Free
Expected to fold away in lowering.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_Reverse
Reverse the order of the vector.
CastContextHint
Represents a hint about the context in which a cast is used.
@ Reversed
The cast is used with a reversed load/store.
@ Masked
The cast is used with a masked load/store.
@ Normal
The cast is used with a normal load/store.
@ Interleave
The cast is used with an interleaved load/store.
@ GatherScatter
The cast is used with a gather/scatter.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isByteTy() const
True if this is an instance of ByteType.
Definition Type.h:242
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:313
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:286
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:278
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:130
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:310
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:186
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:317
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
value_op_iterator value_op_end()
Definition User.h:288
void setOperand(unsigned i, Value *Val)
Definition User.h:212
Value * getOperand(unsigned i) const
Definition User.h:207
value_op_iterator value_op_begin()
Definition User.h:285
void execute(VPTransformState &State) override
Generate the active lane mask phi of the vector loop.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:4148
RecipeListTy & getRecipeList()
Returns a reference to the list of recipes.
Definition VPlan.h:4201
iterator end()
Definition VPlan.h:4185
const VPRecipeBase & front() const
Definition VPlan.h:4195
void insert(VPRecipeBase *Recipe, iterator InsertPt)
Definition VPlan.h:4214
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenMemoryRecipe.
VPValue * getIncomingValue(unsigned Idx) const
Return incoming value number Idx.
Definition VPlan.h:2816
unsigned getNumIncomingValues() const
Return the number of incoming values, taking into account when normalized the first incoming value wi...
Definition VPlan.h:2811
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
bool isNormalized() const
A normalized blend is one that has an odd number of operands, whereby the first operand does not have...
Definition VPlan.h:2807
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition VPlan.h:93
const VPBlocksTy & getPredecessors() const
Definition VPlan.h:221
VPlan * getPlan()
Definition VPlan.cpp:178
void printAsOperand(raw_ostream &OS, bool PrintType=false) const
Definition VPlan.h:363
const VPBasicBlock * getEntryBasicBlock() const
Definition VPlan.cpp:183
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPBranchOnMaskRecipe.
void execute(VPTransformState &State) override
Generate the extraction of the appropriate bit from the block mask and the conditional branch.
VPlan-based builder utility analogous to IRBuilder.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition VPlanValue.h:504
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition VPlanValue.h:477
VPValue * getVPValue(unsigned I)
Returns the VPValue with index I defined by the VPDef.
Definition VPlanValue.h:489
ArrayRef< VPRecipeValue * > definedValues()
Returns an ArrayRef of the values defined by the VPDef.
Definition VPlanValue.h:499
VPIRValue * getStartValue() const
Definition VPlan.h:3965
VPValue * getStepValue() const
Definition VPlan.h:3967
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void decompose()
Insert the recipes of the expression back into the VPlan, directly before the current recipe.
bool isSingleScalar() const
Returns true if the result of this VPExpressionRecipe is a single-scalar.
bool mayHaveSideEffects() const
Returns true if this expression contains recipes that may have side effects.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Compute the cost of this recipe either using a recipe's specialized implementation or using the legac...
bool mayReadOrWriteMemory() const
Returns true if this expression contains recipes that may read from or write to memory.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this header phi recipe.
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition VPlan.h:2336
void execute(VPTransformState &State) override
Produce a vectorized histogram operation.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPHistogramRecipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getMask() const
Return the mask operand if one was provided, or a null pointer if all lanes should be executed uncond...
Definition VPlan.h:2079
Class to record and manage LLVM IR flags.
Definition VPlan.h:685
FastMathFlagsTy FMFs
Definition VPlan.h:773
ReductionFlagsTy ReductionFlags
Definition VPlan.h:775
LLVM_ABI_FOR_TEST bool hasRequiredFlagsForOpcode(unsigned Opcode) const
Returns true if Opcode has its required flags set.
LLVM_ABI_FOR_TEST bool flagsValidForOpcode(unsigned Opcode) const
Returns true if the set flags are valid for Opcode.
static VPIRFlags getDefaultFlags(unsigned Opcode)
Returns default flags for Opcode for opcodes that support it, asserts otherwise.
WrapFlagsTy WrapFlags
Definition VPlan.h:767
void printFlags(raw_ostream &O) const
bool hasFastMathFlags() const
Returns true if the recipe has fast-math flags.
Definition VPlan.h:990
LLVM_ABI_FOR_TEST FastMathFlags getFastMathFlags() const
bool isReductionOrdered() const
Definition VPlan.h:1054
TruncFlagsTy TruncFlags
Definition VPlan.h:768
CmpInst::Predicate getPredicate() const
Definition VPlan.h:962
ExactFlagsTy ExactFlags
Definition VPlan.h:770
void intersectFlags(const VPIRFlags &Other)
Only keep flags also present in Other.
uint8_t GEPFlagsStorage
Definition VPlan.h:771
GEPNoWrapFlags getGEPNoWrapFlags() const
Definition VPlan.h:980
bool hasPredicate() const
Returns true if the recipe has a comparison predicate.
Definition VPlan.h:985
DisjointFlagsTy DisjointFlags
Definition VPlan.h:769
FCmpFlagsTy FCmpFlags
Definition VPlan.h:774
NonNegFlagsTy NonNegFlags
Definition VPlan.h:772
bool isReductionInLoop() const
Definition VPlan.h:1060
void applyFlags(Instruction &I) const
Apply the IR flags to I.
Definition VPlan.h:919
uint8_t CmpPredStorage
Definition VPlan.h:766
RecurKind getRecurKind() const
Definition VPlan.h:1048
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPIRInstruction.
VPIRInstruction(Instruction &I)
VPIRInstruction::create() should be used to create VPIRInstructions, as subclasses may need to be cre...
Definition VPlan.h:1684
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void intersect(const VPIRMetadata &MD)
Intersect this VPIRMetadata object with MD, keeping only metadata nodes that are common to both.
VPIRMetadata()=default
void print(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print metadata with node IDs.
void applyMetadata(Instruction &I) const
Add all metadata to I.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the instruction.
This is a concrete Recipe that models a single VPlan-level instruction.
Definition VPlan.h:1220
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPInstruction.
bool doesGeneratePerAllLanes() const
Returns true if this VPInstruction generates scalar values for all lanes.
@ ExtractLastActive
Extracts the last active lane from a set of vectors.
Definition VPlan.h:1326
@ ExtractLane
Extracts a single lane (first operand) from a set of vector operands.
Definition VPlan.h:1317
@ ExitingIVValue
Compute the exiting value of a wide induction after vectorization, that is the value of the last lane...
Definition VPlan.h:1333
@ WideIVStep
Scale the first operand (vector step) by the second operand (scalar-step).
Definition VPlan.h:1307
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
Definition VPlan.h:1320
@ Unpack
Extracts all lanes from its (non-scalable) vector operand.
Definition VPlan.h:1260
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1311
@ BuildVector
Creates a fixed-width vector containing all operands.
Definition VPlan.h:1255
@ BuildStructVector
Given operands of (the same) struct type, creates a struct of fixed- width vectors each containing a ...
Definition VPlan.h:1252
@ VScale
Returns the value for vscale.
Definition VPlan.h:1329
@ CanonicalIVIncrementForPart
Definition VPlan.h:1236
@ ComputeReductionResult
Reduce the operands to the final reduction result using the operation specified via the operation's V...
Definition VPlan.h:1263
bool hasResult() const
Definition VPlan.h:1411
bool opcodeMayReadOrWriteFromMemory() const
Returns true if the underlying opcode may read from or write to memory.
LLVM_DUMP_METHOD void dump() const
Print the VPInstruction to dbgs() (for debugging).
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the VPInstruction to O.
StringRef getName() const
Returns the symbolic name assigned to the VPInstruction.
Definition VPlan.h:1491
unsigned getOpcode() const
Definition VPlan.h:1395
VPInstruction(unsigned Opcode, ArrayRef< VPValue * > Operands, const VPIRFlags &Flags={}, const VPIRMetadata &MD={}, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
bool isVectorToScalar() const
Returns true if this VPInstruction produces a scalar value from a vector, e.g.
bool isSingleScalar() const
Returns true if this VPInstruction's operands are single scalars and the result is also a single scal...
unsigned getNumOperandsForOpcode() const
Return the number of operands determined by the opcode of the VPInstruction, excluding mask.
bool isMasked() const
Returns true if the VPInstruction has a mask operand.
Definition VPlan.h:1436
void execute(VPTransformState &State) override
Generate the instruction.
bool usesFirstPartOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first part of operand Op.
bool needsMaskForGaps() const
Return true if the access needs a mask because of the gaps.
Definition VPlan.h:2920
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this recipe.
Instruction * getInsertPos() const
Definition VPlan.h:2924
const InterleaveGroup< Instruction > * getInterleaveGroup() const
Definition VPlan.h:2922
VPValue * getMask() const
Return the mask used by this recipe.
Definition VPlan.h:2914
ArrayRef< VPValue * > getStoredValues() const
Return the VPValues stored by this interleave group.
Definition VPlan.h:2943
VPValue * getAddr() const
Return the address accessed by this recipe.
Definition VPlan.h:2908
VPValue * getEVL() const
The VPValue of the explicit vector length.
Definition VPlan.h:3017
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getNumStoreOperands() const override
Returns the number of stored operands of this interleave group.
Definition VPlan.h:3030
void execute(VPTransformState &State) override
Generate the wide load or store, and shuffles.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getNumStoreOperands() const override
Returns the number of stored operands of this interleave group.
Definition VPlan.h:2980
void execute(VPTransformState &State) override
Generate the wide load or store, and shuffles.
In what follows, the term "input IR" refers to code that is fed into the vectorizer whereas the term ...
static VPLane getLastLaneForVF(const ElementCount &VF)
static VPLane getLaneFromEnd(const ElementCount &VF, unsigned Offset)
static VPLane getFirstLane()
virtual const VPRecipeBase * getAsRecipe() const =0
Return a VPRecipeBase* to the current object.
VPValue * getIncomingValueForBlock(const VPBasicBlock *VPBB) const
Returns the incoming value for VPBB. VPBB must be an incoming block.
virtual unsigned getNumIncoming() const
Returns the number of incoming values, also number of incoming blocks.
Definition VPlan.h:1598
void removeIncomingValueFor(VPBlockBase *IncomingBlock) const
Removes the incoming value for IncomingBlock, which must be a predecessor.
const VPBasicBlock * getIncomingBlock(unsigned Idx) const
Returns the incoming block with index Idx.
Definition VPlan.h:4292
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
Definition VPlan.h:1623
VPValue * getIncomingValue(unsigned Idx) const
Returns the incoming VPValue with index Idx.
Definition VPlan.h:1583
void printPhiOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print the recipe.
void setIncomingValueForBlock(const VPBasicBlock *VPBB, VPValue *V) const
Sets the incoming value for VPBB to V.
void execute(VPTransformState &State) override
Generates phi nodes for live-outs (from a replicate region) as needed to retain SSA form.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:401
bool mayReadFromMemory() const
Returns true if the recipe may read from memory.
bool mayHaveSideEffects() const
Returns true if the recipe may have side-effects.
virtual void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const =0
Each concrete VPRecipe prints itself, without printing common information, like debug info or metadat...
VPRegionBlock * getRegion()
Definition VPlan.h:4493
LLVM_ABI_FOR_TEST void dump() const
Dump the recipe to stderr (for debugging).
Definition VPlan.cpp:117
bool isPhi() const
Returns true for PHI-like recipes.
bool mayWriteToMemory() const
Returns true if the recipe may write to memory.
virtual InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const
Compute the cost of this recipe either using a recipe's specialized implementation or using the legac...
VPBasicBlock * getParent()
Definition VPlan.h:475
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition VPlan.h:553
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
bool isSafeToSpeculativelyExecute() const
Return true if we can safely execute this recipe unconditionally even if it is masked originally.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
void insertAfter(VPRecipeBase *InsertPos)
Insert an unlinked Recipe into a basic block immediately after the specified Recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this recipe, taking into account if the cost computation should be skipped and the...
bool isScalarCast() const
Return true if the recipe is a scalar cast.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const
Print the recipe, delegating to printRecipe().
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
unsigned getVPRecipeID() const
Definition VPlan.h:521
void moveAfter(VPRecipeBase *MovePos)
Unlink this recipe from its current VPBasicBlock and insert it into the VPBasicBlock that MovePos liv...
VPRecipeBase(const unsigned char SC, ArrayRef< VPValue * > Operands, DebugLoc DL=DebugLoc::getUnknown())
Definition VPlan.h:465
friend class VPValue
Definition VPlanValue.h:310
void execute(VPTransformState &State) override
Generate the reduction in the loop.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getEVL() const
The VPValue of the explicit vector length.
Definition VPlan.h:3178
unsigned getVFScaleFactor() const
Get the factor that the VF of this recipe's output should be scaled by, or 1 if it isn't scaled.
Definition VPlan.h:2731
bool isInLoop() const
Returns true if the phi is part of an in-loop reduction.
Definition VPlan.h:2755
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the phi/select nodes.
bool isConditional() const
Return true if the in-loop reduction is conditional.
Definition VPlan.h:3120
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of VPReductionRecipe.
VPValue * getVecOp() const
The VPValue of the vector value to be reduced.
Definition VPlan.h:3131
VPValue * getCondOp() const
The VPValue of the condition for the block.
Definition VPlan.h:3133
RecurKind getRecurrenceKind() const
Return the recurrence kind for the in-loop reduction.
Definition VPlan.h:3116
bool isPartialReduction() const
Returns true if the reduction outputs a vector with a scaled down VF.
Definition VPlan.h:3122
VPValue * getChainOp() const
The VPValue of the scalar Chain being accumulated.
Definition VPlan.h:3129
bool isInLoop() const
Returns true if the reduction is in-loop.
Definition VPlan.h:3124
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the reduction in the loop.
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:4358
bool isReplicator() const
An indicator whether this region is to generate multiple replicated instances of output IR correspond...
Definition VPlan.h:4434
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:3200
void execute(VPTransformState &State) override
Generate replicas of the desired Ingredient.
bool isSingleScalar() const
Definition VPlan.h:3248
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPReplicateRecipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
static InstructionCost computeCallCost(Function *CalledFn, Type *ResultTy, ArrayRef< const VPValue * > ArgOps, bool IsSingleScalar, ElementCount VF, VPCostContext &Ctx)
Return the cost of scalarizing a call to CalledFn with argument operands ArgOps for a given VF.
unsigned getOpcode() const
Definition VPlan.h:3272
VPValue * getStepValue() const
Definition VPlan.h:4037
VPValue * getStartIndex() const
Return the StartIndex, or null if known to be zero, valid only after unrolling.
Definition VPlan.h:4045
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the scalarized versions of the phi node as needed by their users.
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition VPlan.h:605
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition VPlan.h:670
LLVM_ABI_FOR_TEST LLVM_DUMP_METHOD void dump() const
Print this VPSingleDefRecipe to dbgs() (for debugging).
VPSingleDefRecipe(const unsigned char SC, ArrayRef< VPValue * > Operands, DebugLoc DL=DebugLoc::getUnknown())
Definition VPlan.h:607
This class can be used to assign names to VPValues.
An analysis for type-inference for VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
Helper to access the operand that contains the unroll part for this recipe after unrolling.
Definition VPlan.h:1153
VPValue * getUnrollPartOperand(const VPUser &U) const
Return the VPValue operand containing the unroll part or null if there is no such operand.
unsigned getUnrollPart(const VPUser &U) const
Return the unroll part.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:335
void printOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print the operands to O.
Definition VPlan.cpp:1524
operand_range operands()
Definition VPlanValue.h:403
unsigned getNumOperands() const
Definition VPlanValue.h:373
operand_iterator op_begin()
Definition VPlanValue.h:399
VPValue * getOperand(unsigned N) const
Definition VPlanValue.h:374
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Definition VPlanValue.h:49
Value * getLiveInIRValue() const
Return the underlying IR value for a VPIRValue.
Definition VPlan.cpp:138
bool isDefinedOutsideLoopRegions() const
Returns true if the VPValue is defined outside any loop.
Definition VPlan.cpp:1475
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition VPlan.cpp:128
void printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const
Definition VPlan.cpp:1520
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition VPlanValue.h:74
void setUnderlyingValue(Value *Val)
Definition VPlanValue.h:202
void replaceAllUsesWith(VPValue *New)
Definition VPlan.cpp:1478
VPValue * getVFValue() const
Definition VPlan.h:2177
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Type * getSourceElementType() const
Definition VPlan.h:2174
int64_t getStride() const
Definition VPlan.h:2175
void materializeOffset(unsigned Part=0)
Adds the offset operand to the recipe.
Type * getSourceElementType() const
Definition VPlan.h:2246
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
operand_range args()
Definition VPlan.h:2031
Function * getCalledScalarFunction() const
Definition VPlan.h:2027
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenCallRecipe.
void execute(VPTransformState &State) override
Produce a widened version of the call instruction.
static InstructionCost computeCallCost(Function *Variant, VPCostContext &Ctx)
Return the cost of widening a call using the vector function Variant.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate a canonical vector induction variable of the vector loop, with start = {<Part*VF,...
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Instruction::CastOps getOpcode() const
Definition VPlan.h:1868
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Type * getResultType() const
Returns the result type of the cast.
Definition VPlan.h:1871
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Produce widened copies of the cast.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenCastRecipe.
void execute(VPTransformState &State) override
Generate the gep nodes.
Type * getSourceElementType() const
Definition VPlan.h:2131
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
VPIRValue * getStartValue() const
Returns the start value of the induction.
Definition VPlan.h:2399
VPValue * getStepValue()
Returns the step value of the induction.
Definition VPlan.h:2402
VPIRValue * getStartValue() const
Returns the start value of the induction.
Definition VPlan.h:2500
TruncInst * getTruncInst()
Returns the first defined value as TruncInst, if it is one or nullptr otherwise.
Definition VPlan.h:2515
Type * getScalarType() const
Returns the scalar type of the induction.
Definition VPlan.h:2524
bool isCanonical() const
Returns true if the induction is canonical, i.e.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Intrinsic::ID getVectorIntrinsicID() const
Return the ID of the intrinsic.
Definition VPlan.h:1959
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
StringRef getIntrinsicName() const
Return to name of the intrinsic as string.
static InstructionCost computeCallCost(Intrinsic::ID ID, ArrayRef< const VPValue * > Operands, const VPRecipeWithIRFlags &R, ElementCount VF, VPCostContext &Ctx)
Compute the cost of a vector intrinsic with ID and Operands.
LLVM_ABI_FOR_TEST bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the VPUser only uses the first lane of operand Op.
Type * getResultType() const
Return the scalar return type of the intrinsic.
Definition VPlan.h:1962
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Produce a widened version of the vector intrinsic.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this vector intrinsic.
bool IsMasked
Whether the memory access is masked.
Definition VPlan.h:3516
bool isConsecutive() const
Return whether the loaded-from / stored-to addresses are consecutive.
Definition VPlan.h:3539
Instruction & Ingredient
Definition VPlan.h:3507
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const
Return the cost of this VPWidenMemoryRecipe.
bool Consecutive
Whether the accessed addresses are consecutive.
Definition VPlan.h:3513
VPValue * getMask() const
Return the mask used by this recipe.
Definition VPlan.h:3549
Align Alignment
Alignment information for this memory access.
Definition VPlan.h:3510
virtual VPRecipeBase * getAsRecipe()=0
Return a VPRecipeBase* to the current object.
VPValue * getAddr() const
Return the address accessed by this recipe.
Definition VPlan.h:3542
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenPHIRecipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the phi/select nodes.
bool onlyScalarsGenerated(bool IsScalable)
Returns true if only scalar values will be generated.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenRecipe.
void execute(VPTransformState &State) override
Produce a widened instruction using the opcode and operands of the recipe, processing State....
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getOpcode() const
Definition VPlan.h:1811
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition VPlan.h:4506
const DataLayout & getDataLayout() const
Definition VPlan.h:4711
VPIRValue * getConstantInt(Type *Ty, uint64_t Val, bool IsSigned=false)
Return a VPIRValue wrapping a ConstantInt with the given type and value.
Definition VPlan.h:4813
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:393
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
void mutateType(Type *Ty)
Mutate the type of this Value to be of the specified type.
Definition Value.h:816
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition TypeSize.h:256
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
iterator erase(iterator where)
Definition ilist.h:204
pointer remove(iterator &IT)
Definition ilist.h:188
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI Intrinsic::ID getDeinterleaveIntrinsicID(unsigned Factor)
Returns the corresponding llvm.vector.deinterleaveN intrinsic for factor N.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > OverloadTys={})
Look up the Function declaration of the intrinsic id in the Module M.
LLVM_ABI StringRef getBaseName(ID id)
Return the LLVM name for an intrinsic, without encoded types for overloading, such as "llvm....
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
auto m_Cmp()
Matches any compare instruction and ignore it.
bool match(Val *V, const Pattern &P)
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
LogicalOp_match< LHS, RHS, Instruction::And, true > m_c_LogicalAnd(const LHS &L, const RHS &R)
Matches L && R with LHS and RHS in either order.
LogicalOp_match< LHS, RHS, Instruction::Or, true > m_c_LogicalOr(const LHS &L, const RHS &R)
Matches L || R with LHS and RHS in either order.
specific_intval< 1 > m_False()
specific_intval< 1 > m_True()
auto m_VPValue()
Match an arbitrary VPValue and ignore it.
VPInstruction_match< VPInstruction::Reverse, Op0_t > m_Reverse(const Op0_t &Op0)
NodeAddr< DefNode * > Def
Definition RDFGraph.h:384
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
bool isAddressSCEVForCost(const SCEV *Addr, ScalarEvolution &SE, const Loop *L)
Returns true if Addr is an address SCEV that can be passed to TTI::getAddressComputationCost,...
bool onlyFirstPartUsed(const VPValue *Def)
Returns true if only the first part of Def is used.
bool onlyFirstLaneUsed(const VPValue *Def)
Returns true if only the first lane of Def is used.
bool onlyScalarValuesUsed(const VPValue *Def)
Returns true if only scalar values of Def are used by all users.
bool isUsedByLoadStoreAddress(const VPValue *V)
Returns true if V is used as part of the address of another load or store.
const SCEV * getSCEVExprForVPValue(const VPValue *V, PredicatedScalarEvolution &PSE, const Loop *L=nullptr)
Return the SCEV expression for V.
bool isHeaderMask(const VPValue *V, const VPlan &Plan)
Return true if V is a header mask in Plan.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:315
LLVM_ABI Value * createSimpleReduction(IRBuilderBase &B, Value *Src, RecurKind RdxKind)
Create a reduction of the given vector.
@ Offset
Definition DWP.cpp:557
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:830
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
InstructionCost Cost
@ Undef
Value of the register doesn't matter.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2553
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
auto map_to_vector(ContainerTy &&C, FuncTy &&F)
Map a range to a SmallVector with element types deduced from the mapping.
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2207
void interleaveComma(const Container &c, StreamT &os, UnaryFunctor each_fn)
Definition STLExtras.h:2312
auto cast_or_null(const Y &Val)
Definition Casting.h:714
LLVM_ABI Value * concatenateVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vecs)
Concatenate a list of vectors.
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
LLVM_ABI Value * createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left, Value *Right)
Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1745
LLVM_ABI Constant * createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF, const InterleaveGroup< Instruction > &Group)
Create a mask that filters the members of an interleave group where there are gaps.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
auto reverse(ContainerTy &&C)
Definition STLExtras.h:407
ElementCount getVectorizedTypeVF(Type *Ty)
Returns the number of vector elements for a vectorized type.
LLVM_ABI llvm::SmallVector< int, 16 > createReplicatedMask(unsigned ReplicationFactor, unsigned VF)
Create a mask with replicated elements.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:209
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1752
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Type * toVectorizedTy(Type *Ty, ElementCount EC)
A helper for converting to vectorized types.
cl::opt< unsigned > ForceTargetInstructionCost
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition STLExtras.h:322
LLVM_ABI bool isVectorIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic that returns a struct is overloaded at the struct elem...
@ Other
Any other memory.
Definition ModRef.h:68
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:221
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
RecurKind
These are the kinds of recurrences that we support.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ FMinimumNum
FP min with llvm.minimumnum semantics.
@ FMinimum
FP min with llvm.minimum semantics.
@ FMaxNum
FP max with llvm.maxnum semantics including NaNs.
@ Mul
Product of integers.
@ FSub
Subtraction of floats.
@ AnyOf
AnyOf reduction with select(cmp(),x,y) where one of (x,y) is loop invariant, and both x and y are int...
@ FindLast
FindLast reduction with select(cmp(),x,y) where x and y.
@ FMaximum
FP max with llvm.maximum semantics.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ FMinNum
FP min with llvm.minnum semantics including NaNs.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
@ FMaximumNum
FP max with llvm.maximumnum semantics.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
LLVM_ABI bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic has a scalar operand.
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
LLVM_ABI Value * createOrderedReduction(IRBuilderBase &B, RecurKind RdxKind, Value *Src, Value *Start)
Create an ordered reduction intrinsic using the given recurrence kind RdxKind.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
LLVM_ABI bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic is overloaded on the type of the operand at index OpdI...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Struct to hold various analysis needed for cost computations.
static bool isFreeScalarIntrinsic(Intrinsic::ID ID)
Returns true if ID is a pseudo intrinsic that is dropped via scalarization rather than widened.
Definition VPlan.cpp:1945
void execute(VPTransformState &State) override
Generate the phi nodes.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this first-order recurrence phi recipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
An overlay for VPIRInstructions wrapping PHI nodes enabling convenient use cast/dyn_cast/isa and exec...
Definition VPlan.h:1742
PHINode & getIRPhi()
Definition VPlan.h:1755
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
void execute(VPTransformState &State) override
Generate the instruction.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
InstructionCost getCostForRecipeWithOpcode(unsigned Opcode, ElementCount VF, VPCostContext &Ctx) const
Compute the cost for this recipe for VF, using Opcode and Ctx.
VPRecipeWithIRFlags(const unsigned char SC, ArrayRef< VPValue * > Operands, const VPIRFlags &Flags, DebugLoc DL=DebugLoc::getUnknown())
Definition VPlan.h:1108
A symbolic live-in VPValue, used for values like vector trip count, VF, and VFxUF.
Definition VPlanValue.h:280
SmallDenseMap< const VPBasicBlock *, BasicBlock * > VPBB2IRBB
A mapping of each VPBasicBlock to the corresponding BasicBlock.
VPTransformState holds information passed down when "executing" a VPlan, needed for generating the ou...
VPTypeAnalysis TypeAnalysis
VPlan-based type analysis.
struct llvm::VPTransformState::CFGState CFG
Value * get(const VPValue *Def, bool IsScalar=false)
Get the generated vector Value for a given VPValue Def if IsScalar is false, otherwise return the gen...
Definition VPlan.cpp:280
IRBuilderBase & Builder
Hold a reference to the IRBuilder used to generate output IR code.
ElementCount VF
The chosen Vectorization Factor of the loop being vectorized.
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Generate the wide load or gather.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenLoadEVLRecipe.
VPValue * getEVL() const
Return the EVL operand.
Definition VPlan.h:3631
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate a wide load or gather.
VPValue * getStoredValue() const
Return the address accessed by this recipe.
Definition VPlan.h:3732
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Generate the wide store or scatter.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenStoreEVLRecipe.
VPValue * getEVL() const
Return the EVL operand.
Definition VPlan.h:3735
void execute(VPTransformState &State) override
Generate a wide store or scatter.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getStoredValue() const
Return the value stored by this recipe.
Definition VPlan.h:3681