LLVM 23.0.0git
SelectionDAG.cpp
Go to the documentation of this file.
1//===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements the SelectionDAG class.
10//
11//===----------------------------------------------------------------------===//
12
14#include "SDNodeDbgValue.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/APSInt.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/BitVector.h"
20#include "llvm/ADT/DenseSet.h"
21#include "llvm/ADT/FoldingSet.h"
22#include "llvm/ADT/STLExtras.h"
25#include "llvm/ADT/Twine.h"
51#include "llvm/IR/Constant.h"
52#include "llvm/IR/Constants.h"
53#include "llvm/IR/DataLayout.h"
55#include "llvm/IR/DebugLoc.h"
57#include "llvm/IR/Function.h"
58#include "llvm/IR/GlobalValue.h"
59#include "llvm/IR/Metadata.h"
60#include "llvm/IR/Type.h"
64#include "llvm/Support/Debug.h"
74#include <algorithm>
75#include <cassert>
76#include <cstdint>
77#include <cstdlib>
78#include <limits>
79#include <optional>
80#include <string>
81#include <utility>
82#include <vector>
83
84using namespace llvm;
85using namespace llvm::SDPatternMatch;
86
87/// makeVTList - Return an instance of the SDVTList struct initialized with the
88/// specified members.
89static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
90 SDVTList Res = {VTs, NumVTs};
91 return Res;
92}
93
94// Default null implementations of the callbacks.
98
99void SelectionDAG::DAGNodeDeletedListener::anchor() {}
100void SelectionDAG::DAGNodeInsertedListener::anchor() {}
101
102#define DEBUG_TYPE "selectiondag"
103
104static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt",
105 cl::Hidden, cl::init(true),
106 cl::desc("Gang up loads and stores generated by inlining of memcpy"));
107
108static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max",
109 cl::desc("Number limit for gluing ld/st of memcpy."),
110 cl::Hidden, cl::init(0));
111
113 MaxSteps("has-predecessor-max-steps", cl::Hidden, cl::init(8192),
114 cl::desc("DAG combiner limit number of steps when searching DAG "
115 "for predecessor nodes"));
116
118 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G););
119}
120
122
123//===----------------------------------------------------------------------===//
124// ConstantFPSDNode Class
125//===----------------------------------------------------------------------===//
126
127/// isExactlyValue - We don't rely on operator== working on double values, as
128/// it returns true for things that are clearly not equal, like -0.0 and 0.0.
129/// As such, this method can be used to do an exact bit-for-bit comparison of
130/// two floating point values.
132 return getValueAPF().bitwiseIsEqual(V);
133}
134
136 const APFloat& Val) {
137 assert(VT.isFloatingPoint() && "Can only convert between FP types");
138
139 // convert modifies in place, so make a copy.
140 APFloat Val2 = APFloat(Val);
141 bool losesInfo;
143 &losesInfo);
144 return !losesInfo;
145}
146
147//===----------------------------------------------------------------------===//
148// ISD Namespace
149//===----------------------------------------------------------------------===//
150
151bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
152 if (N->getOpcode() == ISD::SPLAT_VECTOR) {
153 if (auto OptAPInt = N->getOperand(0)->bitcastToAPInt()) {
154 unsigned EltSize =
155 N->getValueType(0).getVectorElementType().getSizeInBits();
156 SplatVal = OptAPInt->trunc(EltSize);
157 return true;
158 }
159 }
160
161 auto *BV = dyn_cast<BuildVectorSDNode>(N);
162 if (!BV)
163 return false;
164
165 APInt SplatUndef;
166 unsigned SplatBitSize;
167 bool HasUndefs;
168 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
169 // Endianness does not matter here. We are checking for a splat given the
170 // element size of the vector, and if we find such a splat for little endian
171 // layout, then that should be valid also for big endian (as the full vector
172 // size is known to be a multiple of the element size).
173 const bool IsBigEndian = false;
174 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
175 EltSize, IsBigEndian) &&
176 EltSize == SplatBitSize;
177}
178
179// FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
180// specializations of the more general isConstantSplatVector()?
181
182bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) {
183 // Look through a bit convert.
184 while (N->getOpcode() == ISD::BITCAST)
185 N = N->getOperand(0).getNode();
186
187 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
188 APInt SplatVal;
189 return isConstantSplatVector(N, SplatVal) && SplatVal.isAllOnes();
190 }
191
192 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
193
194 unsigned i = 0, e = N->getNumOperands();
195
196 // Skip over all of the undef values.
197 while (i != e && N->getOperand(i).isUndef())
198 ++i;
199
200 // Do not accept an all-undef vector.
201 if (i == e) return false;
202
203 // Do not accept build_vectors that aren't all constants or which have non-~0
204 // elements. We have to be a bit careful here, as the type of the constant
205 // may not be the same as the type of the vector elements due to type
206 // legalization (the elements are promoted to a legal type for the target and
207 // a vector of a type may be legal when the base element type is not).
208 // We only want to check enough bits to cover the vector elements, because
209 // we care if the resultant vector is all ones, not whether the individual
210 // constants are.
211 SDValue NotZero = N->getOperand(i);
212 if (auto OptAPInt = NotZero->bitcastToAPInt()) {
213 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
214 if (OptAPInt->countr_one() < EltSize)
215 return false;
216 } else
217 return false;
218
219 // Okay, we have at least one ~0 value, check to see if the rest match or are
220 // undefs. Even with the above element type twiddling, this should be OK, as
221 // the same type legalization should have applied to all the elements.
222 for (++i; i != e; ++i)
223 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
224 return false;
225 return true;
226}
227
228bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) {
229 // Look through a bit convert.
230 while (N->getOpcode() == ISD::BITCAST)
231 N = N->getOperand(0).getNode();
232
233 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
234 APInt SplatVal;
235 return isConstantSplatVector(N, SplatVal) && SplatVal.isZero();
236 }
237
238 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
239
240 bool IsAllUndef = true;
241 for (const SDValue &Op : N->op_values()) {
242 if (Op.isUndef())
243 continue;
244 IsAllUndef = false;
245 // Do not accept build_vectors that aren't all constants or which have non-0
246 // elements. We have to be a bit careful here, as the type of the constant
247 // may not be the same as the type of the vector elements due to type
248 // legalization (the elements are promoted to a legal type for the target
249 // and a vector of a type may be legal when the base element type is not).
250 // We only want to check enough bits to cover the vector elements, because
251 // we care if the resultant vector is all zeros, not whether the individual
252 // constants are.
253 if (auto OptAPInt = Op->bitcastToAPInt()) {
254 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
255 if (OptAPInt->countr_zero() < EltSize)
256 return false;
257 } else
258 return false;
259 }
260
261 // Do not accept an all-undef vector.
262 if (IsAllUndef)
263 return false;
264 return true;
265}
266
268 return isConstantSplatVectorAllOnes(N, /*BuildVectorOnly*/ true);
269}
270
272 return isConstantSplatVectorAllZeros(N, /*BuildVectorOnly*/ true);
273}
274
276 if (N->getOpcode() != ISD::BUILD_VECTOR)
277 return false;
278
279 for (const SDValue &Op : N->op_values()) {
280 if (Op.isUndef())
281 continue;
283 return false;
284 }
285 return true;
286}
287
289 if (N->getOpcode() != ISD::BUILD_VECTOR)
290 return false;
291
292 for (const SDValue &Op : N->op_values()) {
293 if (Op.isUndef())
294 continue;
296 return false;
297 }
298 return true;
299}
300
301bool ISD::isVectorShrinkable(const SDNode *N, unsigned NewEltSize,
302 bool Signed) {
303 assert(N->getValueType(0).isVector() && "Expected a vector!");
304
305 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
306 if (EltSize <= NewEltSize)
307 return false;
308
309 if (N->getOpcode() == ISD::ZERO_EXTEND) {
310 return (N->getOperand(0).getValueType().getScalarSizeInBits() <=
311 NewEltSize) &&
312 !Signed;
313 }
314 if (N->getOpcode() == ISD::SIGN_EXTEND) {
315 return (N->getOperand(0).getValueType().getScalarSizeInBits() <=
316 NewEltSize) &&
317 Signed;
318 }
319 if (N->getOpcode() != ISD::BUILD_VECTOR)
320 return false;
321
322 for (const SDValue &Op : N->op_values()) {
323 if (Op.isUndef())
324 continue;
326 return false;
327
328 APInt C = Op->getAsAPIntVal().trunc(EltSize);
329 if (Signed && C.trunc(NewEltSize).sext(EltSize) != C)
330 return false;
331 if (!Signed && C.trunc(NewEltSize).zext(EltSize) != C)
332 return false;
333 }
334
335 return true;
336}
337
339 // Return false if the node has no operands.
340 // This is "logically inconsistent" with the definition of "all" but
341 // is probably the desired behavior.
342 if (N->getNumOperands() == 0)
343 return false;
344 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); });
345}
346
348 return N->getOpcode() == ISD::FREEZE && N->getOperand(0).isUndef();
349}
350
351template <typename ConstNodeType>
353 std::function<bool(ConstNodeType *)> Match,
354 bool AllowUndefs, bool AllowTruncation) {
355 // FIXME: Add support for scalar UNDEF cases?
356 if (auto *C = dyn_cast<ConstNodeType>(Op))
357 return Match(C);
358
359 // FIXME: Add support for vector UNDEF cases?
360 if (ISD::BUILD_VECTOR != Op.getOpcode() &&
361 ISD::SPLAT_VECTOR != Op.getOpcode())
362 return false;
363
364 EVT SVT = Op.getValueType().getScalarType();
365 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
366 if (AllowUndefs && Op.getOperand(i).isUndef()) {
367 if (!Match(nullptr))
368 return false;
369 continue;
370 }
371
372 auto *Cst = dyn_cast<ConstNodeType>(Op.getOperand(i));
373 if (!Cst || (!AllowTruncation && Cst->getValueType(0) != SVT) ||
374 !Match(Cst))
375 return false;
376 }
377 return true;
378}
379// Build used template types.
381 SDValue, std::function<bool(ConstantSDNode *)>, bool, bool);
383 SDValue, std::function<bool(ConstantFPSDNode *)>, bool, bool);
384
386 SDValue LHS, SDValue RHS,
387 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
388 bool AllowUndefs, bool AllowTypeMismatch) {
389 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType())
390 return false;
391
392 // TODO: Add support for scalar UNDEF cases?
393 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS))
394 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS))
395 return Match(LHSCst, RHSCst);
396
397 // TODO: Add support for vector UNDEF cases?
398 if (LHS.getOpcode() != RHS.getOpcode() ||
399 (LHS.getOpcode() != ISD::BUILD_VECTOR &&
400 LHS.getOpcode() != ISD::SPLAT_VECTOR))
401 return false;
402
403 EVT SVT = LHS.getValueType().getScalarType();
404 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
405 SDValue LHSOp = LHS.getOperand(i);
406 SDValue RHSOp = RHS.getOperand(i);
407 bool LHSUndef = AllowUndefs && LHSOp.isUndef();
408 bool RHSUndef = AllowUndefs && RHSOp.isUndef();
409 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
410 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
411 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
412 return false;
413 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT ||
414 LHSOp.getValueType() != RHSOp.getValueType()))
415 return false;
416 if (!Match(LHSCst, RHSCst))
417 return false;
418 }
419 return true;
420}
421
423 switch (MinMaxOpc) {
424 default:
425 llvm_unreachable("unrecognized opcode");
426 case ISD::UMIN:
427 return ISD::UMAX;
428 case ISD::UMAX:
429 return ISD::UMIN;
430 case ISD::SMIN:
431 return ISD::SMAX;
432 case ISD::SMAX:
433 return ISD::SMIN;
434 }
435}
436
438 switch (MinMaxOpc) {
439 default:
440 llvm_unreachable("unrecognized min/max opcode");
441 case ISD::SMIN:
442 return ISD::UMIN;
443 case ISD::SMAX:
444 return ISD::UMAX;
445 case ISD::UMIN:
446 return ISD::SMIN;
447 case ISD::UMAX:
448 return ISD::SMAX;
449 }
450}
451
453 switch (VecReduceOpcode) {
454 default:
455 llvm_unreachable("Expected VECREDUCE opcode");
458 case ISD::VP_REDUCE_FADD:
459 case ISD::VP_REDUCE_SEQ_FADD:
460 return ISD::FADD;
463 case ISD::VP_REDUCE_FMUL:
464 case ISD::VP_REDUCE_SEQ_FMUL:
465 return ISD::FMUL;
467 case ISD::VP_REDUCE_ADD:
468 return ISD::ADD;
470 case ISD::VP_REDUCE_MUL:
471 return ISD::MUL;
473 case ISD::VP_REDUCE_AND:
474 return ISD::AND;
476 case ISD::VP_REDUCE_OR:
477 return ISD::OR;
479 case ISD::VP_REDUCE_XOR:
480 return ISD::XOR;
482 case ISD::VP_REDUCE_SMAX:
483 return ISD::SMAX;
485 case ISD::VP_REDUCE_SMIN:
486 return ISD::SMIN;
488 case ISD::VP_REDUCE_UMAX:
489 return ISD::UMAX;
491 case ISD::VP_REDUCE_UMIN:
492 return ISD::UMIN;
494 case ISD::VP_REDUCE_FMAX:
495 return ISD::FMAXNUM;
497 case ISD::VP_REDUCE_FMIN:
498 return ISD::FMINNUM;
500 case ISD::VP_REDUCE_FMAXIMUM:
501 return ISD::FMAXIMUM;
503 case ISD::VP_REDUCE_FMINIMUM:
504 return ISD::FMINIMUM;
505 }
506}
507
509 switch (MaskedOpc) {
510 case ISD::MASKED_UDIV:
511 return ISD::UDIV;
512 case ISD::MASKED_SDIV:
513 return ISD::SDIV;
514 case ISD::MASKED_UREM:
515 return ISD::UREM;
516 case ISD::MASKED_SREM:
517 return ISD::SREM;
518 default:
519 llvm_unreachable("Expected masked binop opcode");
520 }
521}
522
523bool ISD::isVPOpcode(unsigned Opcode) {
524 switch (Opcode) {
525 default:
526 return false;
527#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) \
528 case ISD::VPSD: \
529 return true;
530#include "llvm/IR/VPIntrinsics.def"
531 }
532}
533
534bool ISD::isVPBinaryOp(unsigned Opcode) {
535 switch (Opcode) {
536 default:
537 break;
538#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) case ISD::VPSD:
539#define VP_PROPERTY_BINARYOP return true;
540#define END_REGISTER_VP_SDNODE(VPSD) break;
541#include "llvm/IR/VPIntrinsics.def"
542 }
543 return false;
544}
545
546bool ISD::isVPReduction(unsigned Opcode) {
547 switch (Opcode) {
548 default:
549 return false;
550 case ISD::VP_REDUCE_ADD:
551 case ISD::VP_REDUCE_MUL:
552 case ISD::VP_REDUCE_AND:
553 case ISD::VP_REDUCE_OR:
554 case ISD::VP_REDUCE_XOR:
555 case ISD::VP_REDUCE_SMAX:
556 case ISD::VP_REDUCE_SMIN:
557 case ISD::VP_REDUCE_UMAX:
558 case ISD::VP_REDUCE_UMIN:
559 case ISD::VP_REDUCE_FMAX:
560 case ISD::VP_REDUCE_FMIN:
561 case ISD::VP_REDUCE_FMAXIMUM:
562 case ISD::VP_REDUCE_FMINIMUM:
563 case ISD::VP_REDUCE_FADD:
564 case ISD::VP_REDUCE_FMUL:
565 case ISD::VP_REDUCE_SEQ_FADD:
566 case ISD::VP_REDUCE_SEQ_FMUL:
567 return true;
568 }
569}
570
571/// The operand position of the vector mask.
572std::optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) {
573 switch (Opcode) {
574 default:
575 return std::nullopt;
576#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, ...) \
577 case ISD::VPSD: \
578 return MASKPOS;
579#include "llvm/IR/VPIntrinsics.def"
580 }
581}
582
583/// The operand position of the explicit vector length parameter.
584std::optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) {
585 switch (Opcode) {
586 default:
587 return std::nullopt;
588#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \
589 case ISD::VPSD: \
590 return EVLPOS;
591#include "llvm/IR/VPIntrinsics.def"
592 }
593}
594
595std::optional<unsigned> ISD::getBaseOpcodeForVP(unsigned VPOpcode,
596 bool hasFPExcept) {
597 // FIXME: Return strict opcodes in case of fp exceptions.
598 switch (VPOpcode) {
599 default:
600 return std::nullopt;
601#define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) case ISD::VPOPC:
602#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) return ISD::SDOPC;
603#define END_REGISTER_VP_SDNODE(VPOPC) break;
604#include "llvm/IR/VPIntrinsics.def"
605 }
606 return std::nullopt;
607}
608
609std::optional<unsigned> ISD::getVPForBaseOpcode(unsigned Opcode) {
610 switch (Opcode) {
611 default:
612 return std::nullopt;
613#define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) break;
614#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) case ISD::SDOPC:
615#define END_REGISTER_VP_SDNODE(VPOPC) return ISD::VPOPC;
616#include "llvm/IR/VPIntrinsics.def"
617 }
618}
619
621 switch (ExtType) {
622 case ISD::EXTLOAD:
623 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
624 case ISD::SEXTLOAD:
625 return ISD::SIGN_EXTEND;
626 case ISD::ZEXTLOAD:
627 return ISD::ZERO_EXTEND;
628 default:
629 break;
630 }
631
632 llvm_unreachable("Invalid LoadExtType");
633}
634
636 // To perform this operation, we just need to swap the L and G bits of the
637 // operation.
638 unsigned OldL = (Operation >> 2) & 1;
639 unsigned OldG = (Operation >> 1) & 1;
640 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
641 (OldL << 1) | // New G bit
642 (OldG << 2)); // New L bit.
643}
644
646 unsigned Operation = Op;
647 if (isIntegerLike)
648 Operation ^= 7; // Flip L, G, E bits, but not U.
649 else
650 Operation ^= 15; // Flip all of the condition bits.
651
653 Operation &= ~8; // Don't let N and U bits get set.
654
655 return ISD::CondCode(Operation);
656}
657
661
663 bool isIntegerLike) {
664 return getSetCCInverseImpl(Op, isIntegerLike);
665}
666
667/// For an integer comparison, return 1 if the comparison is a signed operation
668/// and 2 if the result is an unsigned comparison. Return zero if the operation
669/// does not depend on the sign of the input (setne and seteq).
670static int isSignedOp(ISD::CondCode Opcode) {
671 switch (Opcode) {
672 default: llvm_unreachable("Illegal integer setcc operation!");
673 case ISD::SETEQ:
674 case ISD::SETNE: return 0;
675 case ISD::SETLT:
676 case ISD::SETLE:
677 case ISD::SETGT:
678 case ISD::SETGE: return 1;
679 case ISD::SETULT:
680 case ISD::SETULE:
681 case ISD::SETUGT:
682 case ISD::SETUGE: return 2;
683 }
684}
685
687 EVT Type) {
688 bool IsInteger = Type.isInteger();
689 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
690 // Cannot fold a signed integer setcc with an unsigned integer setcc.
691 return ISD::SETCC_INVALID;
692
693 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
694
695 // If the N and U bits get set, then the resultant comparison DOES suddenly
696 // care about orderedness, and it is true when ordered.
697 if (Op > ISD::SETTRUE2)
698 Op &= ~16; // Clear the U bit if the N bit is set.
699
700 // Canonicalize illegal integer setcc's.
701 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
702 Op = ISD::SETNE;
703
704 return ISD::CondCode(Op);
705}
706
708 EVT Type) {
709 bool IsInteger = Type.isInteger();
710 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
711 // Cannot fold a signed setcc with an unsigned setcc.
712 return ISD::SETCC_INVALID;
713
714 // Combine all of the condition bits.
715 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
716
717 // Canonicalize illegal integer setcc's.
718 if (IsInteger) {
719 switch (Result) {
720 default: break;
721 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
722 case ISD::SETOEQ: // SETEQ & SETU[LG]E
723 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
724 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
725 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
726 }
727 }
728
729 return Result;
730}
731
732//===----------------------------------------------------------------------===//
733// SDNode Profile Support
734//===----------------------------------------------------------------------===//
735
736/// AddNodeIDOpcode - Add the node opcode to the NodeID data.
737static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
738 ID.AddInteger(OpC);
739}
740
741/// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
742/// solely with their pointer.
744 ID.AddPointer(VTList.VTs);
745}
746
747/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
750 for (const auto &Op : Ops) {
751 ID.AddPointer(Op.getNode());
752 ID.AddInteger(Op.getResNo());
753 }
754}
755
756/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
759 for (const auto &Op : Ops) {
760 ID.AddPointer(Op.getNode());
761 ID.AddInteger(Op.getResNo());
762 }
763}
764
765static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned OpC,
766 SDVTList VTList, ArrayRef<SDValue> OpList) {
767 AddNodeIDOpcode(ID, OpC);
768 AddNodeIDValueTypes(ID, VTList);
769 AddNodeIDOperands(ID, OpList);
770}
771
772/// If this is an SDNode with special info, add this info to the NodeID data.
774 switch (N->getOpcode()) {
777 case ISD::MCSymbol:
778 llvm_unreachable("Should only be used on nodes with operands");
779 default: break; // Normal nodes don't need extra info.
781 case ISD::Constant: {
783 ID.AddPointer(C->getConstantIntValue());
784 ID.AddBoolean(C->isOpaque());
785 break;
786 }
788 case ISD::ConstantFP:
789 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
790 break;
796 ID.AddPointer(GA->getGlobal());
797 ID.AddInteger(GA->getOffset());
798 ID.AddInteger(GA->getTargetFlags());
799 break;
800 }
801 case ISD::BasicBlock:
803 break;
804 case ISD::Register:
805 ID.AddInteger(cast<RegisterSDNode>(N)->getReg().id());
806 break;
808 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
809 break;
810 case ISD::SRCVALUE:
811 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
812 break;
813 case ISD::FrameIndex:
815 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
816 break;
818 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getGuid());
819 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getIndex());
820 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getAttributes());
821 break;
822 case ISD::JumpTable:
824 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
825 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
826 break;
830 ID.AddInteger(CP->getAlign().value());
831 ID.AddInteger(CP->getOffset());
834 else
835 ID.AddPointer(CP->getConstVal());
836 ID.AddInteger(CP->getTargetFlags());
837 break;
838 }
839 case ISD::TargetIndex: {
841 ID.AddInteger(TI->getIndex());
842 ID.AddInteger(TI->getOffset());
843 ID.AddInteger(TI->getTargetFlags());
844 break;
845 }
846 case ISD::LOAD: {
847 const LoadSDNode *LD = cast<LoadSDNode>(N);
848 ID.AddInteger(LD->getMemoryVT().getRawBits());
849 ID.AddInteger(LD->getRawSubclassData());
850 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
851 ID.AddInteger(LD->getMemOperand()->getFlags());
852 break;
853 }
854 case ISD::STORE: {
855 const StoreSDNode *ST = cast<StoreSDNode>(N);
856 ID.AddInteger(ST->getMemoryVT().getRawBits());
857 ID.AddInteger(ST->getRawSubclassData());
858 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
859 ID.AddInteger(ST->getMemOperand()->getFlags());
860 break;
861 }
862 case ISD::VP_LOAD: {
863 const VPLoadSDNode *ELD = cast<VPLoadSDNode>(N);
864 ID.AddInteger(ELD->getMemoryVT().getRawBits());
865 ID.AddInteger(ELD->getRawSubclassData());
866 ID.AddInteger(ELD->getPointerInfo().getAddrSpace());
867 ID.AddInteger(ELD->getMemOperand()->getFlags());
868 break;
869 }
870 case ISD::VP_LOAD_FF: {
871 const auto *LD = cast<VPLoadFFSDNode>(N);
872 ID.AddInteger(LD->getMemoryVT().getRawBits());
873 ID.AddInteger(LD->getRawSubclassData());
874 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
875 ID.AddInteger(LD->getMemOperand()->getFlags());
876 break;
877 }
878 case ISD::VP_STORE: {
879 const VPStoreSDNode *EST = cast<VPStoreSDNode>(N);
880 ID.AddInteger(EST->getMemoryVT().getRawBits());
881 ID.AddInteger(EST->getRawSubclassData());
882 ID.AddInteger(EST->getPointerInfo().getAddrSpace());
883 ID.AddInteger(EST->getMemOperand()->getFlags());
884 break;
885 }
886 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD: {
888 ID.AddInteger(SLD->getMemoryVT().getRawBits());
889 ID.AddInteger(SLD->getRawSubclassData());
890 ID.AddInteger(SLD->getPointerInfo().getAddrSpace());
891 break;
892 }
893 case ISD::EXPERIMENTAL_VP_STRIDED_STORE: {
895 ID.AddInteger(SST->getMemoryVT().getRawBits());
896 ID.AddInteger(SST->getRawSubclassData());
897 ID.AddInteger(SST->getPointerInfo().getAddrSpace());
898 break;
899 }
900 case ISD::VP_GATHER: {
902 ID.AddInteger(EG->getMemoryVT().getRawBits());
903 ID.AddInteger(EG->getRawSubclassData());
904 ID.AddInteger(EG->getPointerInfo().getAddrSpace());
905 ID.AddInteger(EG->getMemOperand()->getFlags());
906 break;
907 }
908 case ISD::VP_SCATTER: {
910 ID.AddInteger(ES->getMemoryVT().getRawBits());
911 ID.AddInteger(ES->getRawSubclassData());
912 ID.AddInteger(ES->getPointerInfo().getAddrSpace());
913 ID.AddInteger(ES->getMemOperand()->getFlags());
914 break;
915 }
916 case ISD::MLOAD: {
918 ID.AddInteger(MLD->getMemoryVT().getRawBits());
919 ID.AddInteger(MLD->getRawSubclassData());
920 ID.AddInteger(MLD->getPointerInfo().getAddrSpace());
921 ID.AddInteger(MLD->getMemOperand()->getFlags());
922 break;
923 }
924 case ISD::MSTORE: {
926 ID.AddInteger(MST->getMemoryVT().getRawBits());
927 ID.AddInteger(MST->getRawSubclassData());
928 ID.AddInteger(MST->getPointerInfo().getAddrSpace());
929 ID.AddInteger(MST->getMemOperand()->getFlags());
930 break;
931 }
932 case ISD::MGATHER: {
934 ID.AddInteger(MG->getMemoryVT().getRawBits());
935 ID.AddInteger(MG->getRawSubclassData());
936 ID.AddInteger(MG->getPointerInfo().getAddrSpace());
937 ID.AddInteger(MG->getMemOperand()->getFlags());
938 break;
939 }
940 case ISD::MSCATTER: {
942 ID.AddInteger(MS->getMemoryVT().getRawBits());
943 ID.AddInteger(MS->getRawSubclassData());
944 ID.AddInteger(MS->getPointerInfo().getAddrSpace());
945 ID.AddInteger(MS->getMemOperand()->getFlags());
946 break;
947 }
950 case ISD::ATOMIC_SWAP:
962 case ISD::ATOMIC_LOAD:
963 case ISD::ATOMIC_STORE: {
964 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
965 ID.AddInteger(AT->getMemoryVT().getRawBits());
966 ID.AddInteger(AT->getRawSubclassData());
967 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
968 ID.AddInteger(AT->getMemOperand()->getFlags());
969 break;
970 }
971 case ISD::VECTOR_SHUFFLE: {
972 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
973 for (int M : Mask)
974 ID.AddInteger(M);
975 break;
976 }
977 case ISD::ADDRSPACECAST: {
979 ID.AddInteger(ASC->getSrcAddressSpace());
980 ID.AddInteger(ASC->getDestAddressSpace());
981 break;
982 }
984 case ISD::BlockAddress: {
986 ID.AddPointer(BA->getBlockAddress());
987 ID.AddInteger(BA->getOffset());
988 ID.AddInteger(BA->getTargetFlags());
989 break;
990 }
991 case ISD::AssertAlign:
992 ID.AddInteger(cast<AssertAlignSDNode>(N)->getAlign().value());
993 break;
994 case ISD::PREFETCH:
997 // Handled by MemIntrinsicSDNode check after the switch.
998 break;
1000 ID.AddPointer(cast<MDNodeSDNode>(N)->getMD());
1001 break;
1002 } // end switch (N->getOpcode())
1003
1004 // MemIntrinsic nodes could also have subclass data, address spaces, and flags
1005 // to check.
1006 if (auto *MN = dyn_cast<MemIntrinsicSDNode>(N)) {
1007 ID.AddInteger(MN->getRawSubclassData());
1008 ID.AddInteger(MN->getMemoryVT().getRawBits());
1009 for (const MachineMemOperand *MMO : MN->memoperands()) {
1010 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
1011 ID.AddInteger(MMO->getFlags());
1012 }
1013 }
1014}
1015
1016/// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
1017/// data.
1019 AddNodeIDOpcode(ID, N->getOpcode());
1020 // Add the return value info.
1021 AddNodeIDValueTypes(ID, N->getVTList());
1022 // Add the operand info.
1023 AddNodeIDOperands(ID, N->ops());
1024
1025 // Handle SDNode leafs with special info.
1027}
1028
1029//===----------------------------------------------------------------------===//
1030// SelectionDAG Class
1031//===----------------------------------------------------------------------===//
1032
1033/// doNotCSE - Return true if CSE should not be performed for this node.
1034static bool doNotCSE(SDNode *N) {
1035 if (N->getValueType(0) == MVT::Glue)
1036 return true; // Never CSE anything that produces a glue result.
1037
1038 switch (N->getOpcode()) {
1039 default: break;
1040 case ISD::HANDLENODE:
1041 case ISD::EH_LABEL:
1042 return true; // Never CSE these nodes.
1043 }
1044
1045 // Check that remaining values produced are not flags.
1046 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
1047 if (N->getValueType(i) == MVT::Glue)
1048 return true; // Never CSE anything that produces a glue result.
1049
1050 return false;
1051}
1052
1053/// Construct a DemandedElts mask which demands all elements of \p V.
1054/// If \p V is not a fixed-length vector, then this will return a single bit.
1056 EVT VT = V.getValueType();
1057 // Since the number of lanes in a scalable vector is unknown at compile time,
1058 // we track one bit which is implicitly broadcast to all lanes. This means
1059 // that all lanes in a scalable vector are considered demanded.
1061 : APInt(1, 1);
1062}
1063
1064/// RemoveDeadNodes - This method deletes all unreachable nodes in the
1065/// SelectionDAG.
1067 // Create a dummy node (which is not added to allnodes), that adds a reference
1068 // to the root node, preventing it from being deleted.
1069 HandleSDNode Dummy(getRoot());
1070
1071 SmallVector<SDNode*, 128> DeadNodes;
1072
1073 // Add all obviously-dead nodes to the DeadNodes worklist.
1074 for (SDNode &Node : allnodes())
1075 if (Node.use_empty())
1076 DeadNodes.push_back(&Node);
1077
1078 RemoveDeadNodes(DeadNodes);
1079
1080 // If the root changed (e.g. it was a dead load, update the root).
1081 setRoot(Dummy.getValue());
1082}
1083
1084/// RemoveDeadNodes - This method deletes the unreachable nodes in the
1085/// given list, and any nodes that become unreachable as a result.
1087
1088 // Process the worklist, deleting the nodes and adding their uses to the
1089 // worklist.
1090 while (!DeadNodes.empty()) {
1091 SDNode *N = DeadNodes.pop_back_val();
1092 // Skip to next node if we've already managed to delete the node. This could
1093 // happen if replacing a node causes a node previously added to the node to
1094 // be deleted.
1095 if (N->getOpcode() == ISD::DELETED_NODE)
1096 continue;
1097
1098 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1099 DUL->NodeDeleted(N, nullptr);
1100
1101 // Take the node out of the appropriate CSE map.
1102 RemoveNodeFromCSEMaps(N);
1103
1104 // Next, brutally remove the operand list. This is safe to do, as there are
1105 // no cycles in the graph.
1106 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
1107 SDUse &Use = *I++;
1108 SDNode *Operand = Use.getNode();
1109 Use.set(SDValue());
1110
1111 // Now that we removed this operand, see if there are no uses of it left.
1112 if (Operand->use_empty())
1113 DeadNodes.push_back(Operand);
1114 }
1115
1116 DeallocateNode(N);
1117 }
1118}
1119
1121 SmallVector<SDNode*, 16> DeadNodes(1, N);
1122
1123 // Create a dummy node that adds a reference to the root node, preventing
1124 // it from being deleted. (This matters if the root is an operand of the
1125 // dead node.)
1126 HandleSDNode Dummy(getRoot());
1127
1128 RemoveDeadNodes(DeadNodes);
1129}
1130
1132 // First take this out of the appropriate CSE map.
1133 RemoveNodeFromCSEMaps(N);
1134
1135 // Finally, remove uses due to operands of this node, remove from the
1136 // AllNodes list, and delete the node.
1137 DeleteNodeNotInCSEMaps(N);
1138}
1139
1140void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
1141 assert(N->getIterator() != AllNodes.begin() &&
1142 "Cannot delete the entry node!");
1143 assert(N->use_empty() && "Cannot delete a node that is not dead!");
1144
1145 // Drop all of the operands and decrement used node's use counts.
1146 N->DropOperands();
1147
1148 DeallocateNode(N);
1149}
1150
1151void SDDbgInfo::add(SDDbgValue *V, bool isParameter) {
1152 assert(!(V->isVariadic() && isParameter));
1153 if (isParameter)
1154 ByvalParmDbgValues.push_back(V);
1155 else
1156 DbgValues.push_back(V);
1157 for (const SDNode *Node : V->getSDNodes())
1158 if (Node)
1159 DbgValMap[Node].push_back(V);
1160}
1161
1163 DbgValMapType::iterator I = DbgValMap.find(Node);
1164 if (I == DbgValMap.end())
1165 return;
1166 for (auto &Val: I->second)
1167 Val->setIsInvalidated();
1168 DbgValMap.erase(I);
1169}
1170
1171void SelectionDAG::DeallocateNode(SDNode *N) {
1172 // If we have operands, deallocate them.
1174
1175 NodeAllocator.Deallocate(AllNodes.remove(N));
1176
1177 // Set the opcode to DELETED_NODE to help catch bugs when node
1178 // memory is reallocated.
1179 // FIXME: There are places in SDag that have grown a dependency on the opcode
1180 // value in the released node.
1181 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType));
1182 N->NodeType = ISD::DELETED_NODE;
1183
1184 // If any of the SDDbgValue nodes refer to this SDNode, invalidate
1185 // them and forget about that node.
1186 DbgInfo->erase(N);
1187
1188 // Invalidate extra info.
1189 SDEI.erase(N);
1190}
1191
1192#ifndef NDEBUG
1193/// VerifySDNode - Check the given SDNode. Aborts if it is invalid.
1194void SelectionDAG::verifyNode(SDNode *N) const {
1195 switch (N->getOpcode()) {
1196 default:
1197 if (N->isTargetOpcode())
1199 break;
1200 case ISD::BUILD_PAIR: {
1201 EVT VT = N->getValueType(0);
1202 assert(N->getNumValues() == 1 && "Too many results!");
1203 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
1204 "Wrong return type!");
1205 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
1206 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
1207 "Mismatched operand types!");
1208 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
1209 "Wrong operand type!");
1210 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
1211 "Wrong return type size");
1212 break;
1213 }
1214 case ISD::BUILD_VECTOR: {
1215 assert(N->getNumValues() == 1 && "Too many results!");
1216 assert(N->getValueType(0).isVector() && "Wrong return type!");
1217 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
1218 "Wrong number of operands!");
1219 EVT EltVT = N->getValueType(0).getVectorElementType();
1220 for (const SDUse &Op : N->ops()) {
1221 assert((Op.getValueType() == EltVT ||
1222 (EltVT.isInteger() && Op.getValueType().isInteger() &&
1223 EltVT.bitsLE(Op.getValueType()))) &&
1224 "Wrong operand type!");
1225 assert(Op.getValueType() == N->getOperand(0).getValueType() &&
1226 "Operands must all have the same type");
1227 }
1228 break;
1229 }
1230 case ISD::SADDO:
1231 case ISD::UADDO:
1232 case ISD::SSUBO:
1233 case ISD::USUBO:
1234 assert(N->getNumValues() == 2 && "Wrong number of results!");
1235 assert(N->getVTList().NumVTs == 2 && N->getNumOperands() == 2 &&
1236 "Invalid add/sub overflow op!");
1237 assert(N->getVTList().VTs[0].isInteger() &&
1238 N->getVTList().VTs[1].isInteger() &&
1239 N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
1240 N->getOperand(0).getValueType() == N->getVTList().VTs[0] &&
1241 "Binary operator types must match!");
1242 break;
1243 }
1244}
1245#endif // NDEBUG
1246
1247/// Insert a newly allocated node into the DAG.
1248///
1249/// Handles insertion into the all nodes list and CSE map, as well as
1250/// verification and other common operations when a new node is allocated.
1251void SelectionDAG::InsertNode(SDNode *N) {
1252 AllNodes.push_back(N);
1253#ifndef NDEBUG
1254 N->PersistentId = NextPersistentId++;
1255 verifyNode(N);
1256#endif
1257 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1258 DUL->NodeInserted(N);
1259}
1260
1261/// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
1262/// correspond to it. This is useful when we're about to delete or repurpose
1263/// the node. We don't want future request for structurally identical nodes
1264/// to return N anymore.
1265bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
1266 bool Erased = false;
1267 switch (N->getOpcode()) {
1268 case ISD::HANDLENODE: return false; // noop.
1269 case ISD::CONDCODE:
1270 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
1271 "Cond code doesn't exist!");
1272 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
1273 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
1274 break;
1276 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
1277 break;
1279 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
1280 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
1281 ESN->getSymbol(), ESN->getTargetFlags()));
1282 break;
1283 }
1284 case ISD::MCSymbol: {
1285 auto *MCSN = cast<MCSymbolSDNode>(N);
1286 Erased = MCSymbols.erase(MCSN->getMCSymbol());
1287 break;
1288 }
1289 case ISD::VALUETYPE: {
1290 EVT VT = cast<VTSDNode>(N)->getVT();
1291 if (VT.isExtended()) {
1292 Erased = ExtendedValueTypeNodes.erase(VT);
1293 } else {
1294 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
1295 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
1296 }
1297 break;
1298 }
1299 default:
1300 // Remove it from the CSE Map.
1301 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
1302 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
1303 Erased = CSEMap.RemoveNode(N);
1304 break;
1305 }
1306#ifndef NDEBUG
1307 // Verify that the node was actually in one of the CSE maps, unless it has a
1308 // glue result (which cannot be CSE'd) or is one of the special cases that are
1309 // not subject to CSE.
1310 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
1311 !N->isMachineOpcode() && !doNotCSE(N)) {
1312 N->dump(this);
1313 dbgs() << "\n";
1314 llvm_unreachable("Node is not in map!");
1315 }
1316#endif
1317 return Erased;
1318}
1319
1320/// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
1321/// maps and modified in place. Add it back to the CSE maps, unless an identical
1322/// node already exists, in which case transfer all its users to the existing
1323/// node. This transfer can potentially trigger recursive merging.
1324void
1325SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
1326 // For node types that aren't CSE'd, just act as if no identical node
1327 // already exists.
1328 if (!doNotCSE(N)) {
1329 SDNode *Existing = CSEMap.GetOrInsertNode(N);
1330 if (Existing != N) {
1331 // If there was already an existing matching node, use ReplaceAllUsesWith
1332 // to replace the dead one with the existing one. This can cause
1333 // recursive merging of other unrelated nodes down the line.
1334 Existing->intersectFlagsWith(N->getFlags());
1335 if (auto *MemNode = dyn_cast<MemSDNode>(Existing))
1336 MemNode->refineRanges(cast<MemSDNode>(N)->memoperands());
1337 ReplaceAllUsesWith(N, Existing);
1338
1339 // N is now dead. Inform the listeners and delete it.
1340 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1341 DUL->NodeDeleted(N, Existing);
1342 DeleteNodeNotInCSEMaps(N);
1343 return;
1344 }
1345 }
1346
1347 // If the node doesn't already exist, we updated it. Inform listeners.
1348 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1349 DUL->NodeUpdated(N);
1350}
1351
1352/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1353/// were replaced with those specified. If this node is never memoized,
1354/// return null, otherwise return a pointer to the slot it would take. If a
1355/// node already exists with these operands, the slot will be non-null.
1356SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
1357 void *&InsertPos) {
1358 if (doNotCSE(N))
1359 return nullptr;
1360
1361 SDValue Ops[] = { Op };
1362 FoldingSetNodeID ID;
1363 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1365 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1366 if (Node)
1367 Node->intersectFlagsWith(N->getFlags());
1368 return Node;
1369}
1370
1371/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1372/// were replaced with those specified. If this node is never memoized,
1373/// return null, otherwise return a pointer to the slot it would take. If a
1374/// node already exists with these operands, the slot will be non-null.
1375SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
1376 SDValue Op1, SDValue Op2,
1377 void *&InsertPos) {
1378 if (doNotCSE(N))
1379 return nullptr;
1380
1381 SDValue Ops[] = { Op1, Op2 };
1382 FoldingSetNodeID ID;
1383 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1385 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1386 if (Node)
1387 Node->intersectFlagsWith(N->getFlags());
1388 return Node;
1389}
1390
1391/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1392/// were replaced with those specified. If this node is never memoized,
1393/// return null, otherwise return a pointer to the slot it would take. If a
1394/// node already exists with these operands, the slot will be non-null.
1395SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
1396 void *&InsertPos) {
1397 if (doNotCSE(N))
1398 return nullptr;
1399
1400 FoldingSetNodeID ID;
1401 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1403 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1404 if (Node)
1405 Node->intersectFlagsWith(N->getFlags());
1406 return Node;
1407}
1408
1410 Type *Ty = VT == MVT::iPTR ? PointerType::get(*getContext(), 0)
1411 : VT.getTypeForEVT(*getContext());
1412
1413 return getDataLayout().getABITypeAlign(Ty);
1414}
1415
1416// EntryNode could meaningfully have debug info if we can find it...
1418 : TM(tm), OptLevel(OL), EntryNode(ISD::EntryToken, 0, DebugLoc(),
1419 getVTList(MVT::Other, MVT::Glue)),
1420 Root(getEntryNode()) {
1421 InsertNode(&EntryNode);
1422 DbgInfo = new SDDbgInfo();
1423}
1424
1426 OptimizationRemarkEmitter &NewORE, Pass *PassPtr,
1427 const TargetLibraryInfo *LibraryInfo,
1428 const LibcallLoweringInfo *LibcallsInfo,
1429 UniformityInfo *NewUA, ProfileSummaryInfo *PSIin,
1431 FunctionVarLocs const *VarLocs) {
1432 MF = &NewMF;
1433 SDAGISelPass = PassPtr;
1434 ORE = &NewORE;
1437 LibInfo = LibraryInfo;
1438 Libcalls = LibcallsInfo;
1439 Context = &MF->getFunction().getContext();
1440 UA = NewUA;
1441 PSI = PSIin;
1442 BFI = BFIin;
1443 MMI = &MMIin;
1444 FnVarLocs = VarLocs;
1445}
1446
1448 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
1449 allnodes_clear();
1450 OperandRecycler.clear(OperandAllocator);
1451 delete DbgInfo;
1452}
1453
1455 return llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI);
1456}
1457
1458void SelectionDAG::allnodes_clear() {
1459 assert(&*AllNodes.begin() == &EntryNode);
1460 AllNodes.remove(AllNodes.begin());
1461 while (!AllNodes.empty())
1462 DeallocateNode(&AllNodes.front());
1463#ifndef NDEBUG
1464 NextPersistentId = 0;
1465#endif
1466}
1467
1468SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1469 void *&InsertPos) {
1470 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1471 if (N) {
1472 switch (N->getOpcode()) {
1473 default: break;
1474 case ISD::Constant:
1475 case ISD::ConstantFP:
1476 llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
1477 "debug location. Use another overload.");
1478 }
1479 }
1480 return N;
1481}
1482
1483SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1484 const SDLoc &DL, void *&InsertPos) {
1485 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1486 if (N) {
1487 switch (N->getOpcode()) {
1488 case ISD::Constant:
1489 case ISD::ConstantFP:
1490 // Erase debug location from the node if the node is used at several
1491 // different places. Do not propagate one location to all uses as it
1492 // will cause a worse single stepping debugging experience.
1493 if (N->getDebugLoc() != DL.getDebugLoc())
1494 N->setDebugLoc(DebugLoc());
1495 break;
1496 default:
1497 // When the node's point of use is located earlier in the instruction
1498 // sequence than its prior point of use, update its debug info to the
1499 // earlier location.
1500 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
1501 N->setDebugLoc(DL.getDebugLoc());
1502 break;
1503 }
1504 }
1505 return N;
1506}
1507
1509 allnodes_clear();
1510 OperandRecycler.clear(OperandAllocator);
1511 OperandAllocator.Reset();
1512 CSEMap.clear();
1513
1514 ExtendedValueTypeNodes.clear();
1515 ExternalSymbols.clear();
1516 TargetExternalSymbols.clear();
1517 MCSymbols.clear();
1518 SDEI.clear();
1519 llvm::fill(CondCodeNodes, nullptr);
1520 llvm::fill(ValueTypeNodes, nullptr);
1521
1522 EntryNode.UseList = nullptr;
1523 InsertNode(&EntryNode);
1524 Root = getEntryNode();
1525 DbgInfo->clear();
1526}
1527
1529 return VT.bitsGT(Op.getValueType())
1530 ? getNode(ISD::FP_EXTEND, DL, VT, Op)
1531 : getNode(ISD::FP_ROUND, DL, VT, Op,
1532 getIntPtrConstant(0, DL, /*isTarget=*/true));
1533}
1534
1535std::pair<SDValue, SDValue>
1537 const SDLoc &DL, EVT VT) {
1538 assert(!VT.bitsEq(Op.getValueType()) &&
1539 "Strict no-op FP extend/round not allowed.");
1540 SDValue Res =
1541 VT.bitsGT(Op.getValueType())
1542 ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op})
1543 : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other},
1544 {Chain, Op, getIntPtrConstant(0, DL, /*isTarget=*/true)});
1545
1546 return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1));
1547}
1548
1550 return VT.bitsGT(Op.getValueType()) ?
1551 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1552 getNode(ISD::TRUNCATE, DL, VT, Op);
1553}
1554
1556 return VT.bitsGT(Op.getValueType()) ?
1557 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1558 getNode(ISD::TRUNCATE, DL, VT, Op);
1559}
1560
1562 return VT.bitsGT(Op.getValueType()) ?
1563 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1564 getNode(ISD::TRUNCATE, DL, VT, Op);
1565}
1566
1568 EVT VT) {
1569 assert(!VT.isVector());
1570 auto Type = Op.getValueType();
1571 SDValue DestOp;
1572 if (Type == VT)
1573 return Op;
1574 auto Size = Op.getValueSizeInBits();
1575 DestOp = getBitcast(EVT::getIntegerVT(*Context, Size), Op);
1576 if (DestOp.getValueType() == VT)
1577 return DestOp;
1578
1579 return getAnyExtOrTrunc(DestOp, DL, VT);
1580}
1581
1583 EVT VT) {
1584 assert(!VT.isVector());
1585 auto Type = Op.getValueType();
1586 SDValue DestOp;
1587 if (Type == VT)
1588 return Op;
1589 auto Size = Op.getValueSizeInBits();
1590 DestOp = getBitcast(MVT::getIntegerVT(Size), Op);
1591 if (DestOp.getValueType() == VT)
1592 return DestOp;
1593
1594 return getSExtOrTrunc(DestOp, DL, VT);
1595}
1596
1598 EVT VT) {
1599 assert(!VT.isVector());
1600 auto Type = Op.getValueType();
1601 SDValue DestOp;
1602 if (Type == VT)
1603 return Op;
1604 auto Size = Op.getValueSizeInBits();
1605 DestOp = getBitcast(MVT::getIntegerVT(Size), Op);
1606 if (DestOp.getValueType() == VT)
1607 return DestOp;
1608
1609 return getZExtOrTrunc(DestOp, DL, VT);
1610}
1611
1613 EVT OpVT) {
1614 if (VT.bitsLE(Op.getValueType()))
1615 return getNode(ISD::TRUNCATE, SL, VT, Op);
1616
1617 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1618 return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1619}
1620
1622 EVT OpVT = Op.getValueType();
1623 assert(VT.isInteger() && OpVT.isInteger() &&
1624 "Cannot getZeroExtendInReg FP types");
1625 assert(VT.isVector() == OpVT.isVector() &&
1626 "getZeroExtendInReg type should be vector iff the operand "
1627 "type is vector!");
1628 assert((!VT.isVector() ||
1630 "Vector element counts must match in getZeroExtendInReg");
1631 assert(VT.getScalarType().bitsLE(OpVT.getScalarType()) && "Not extending!");
1632 if (OpVT == VT)
1633 return Op;
1634 // TODO: Use computeKnownBits instead of AssertZext.
1635 if (Op.getOpcode() == ISD::AssertZext && cast<VTSDNode>(Op.getOperand(1))
1636 ->getVT()
1637 .getScalarType()
1638 .bitsLE(VT.getScalarType()))
1639 return Op;
1641 VT.getScalarSizeInBits());
1642 return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT));
1643}
1644
1646 SDValue EVL, const SDLoc &DL,
1647 EVT VT) {
1648 EVT OpVT = Op.getValueType();
1649 assert(VT.isInteger() && OpVT.isInteger() &&
1650 "Cannot getVPZeroExtendInReg FP types");
1651 assert(VT.isVector() && OpVT.isVector() &&
1652 "getVPZeroExtendInReg type and operand type should be vector!");
1654 "Vector element counts must match in getZeroExtendInReg");
1655 assert(VT.getScalarType().bitsLE(OpVT.getScalarType()) && "Not extending!");
1656 if (OpVT == VT)
1657 return Op;
1659 VT.getScalarSizeInBits());
1660 return getNode(ISD::VP_AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT), Mask,
1661 EVL);
1662}
1663
1665 // Only unsigned pointer semantics are supported right now. In the future this
1666 // might delegate to TLI to check pointer signedness.
1667 return getZExtOrTrunc(Op, DL, VT);
1668}
1669
1671 // Only unsigned pointer semantics are supported right now. In the future this
1672 // might delegate to TLI to check pointer signedness.
1673 return getZeroExtendInReg(Op, DL, VT);
1674}
1675
1677 return getNode(ISD::SUB, DL, VT, getConstant(0, DL, VT), Val);
1678}
1679
1680/// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
1682 return getNode(ISD::XOR, DL, VT, Val, getAllOnesConstant(DL, VT));
1683}
1684
1686 SDValue TrueValue = getBoolConstant(true, DL, VT, VT);
1687 return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1688}
1689
1691 SDValue Mask, SDValue EVL, EVT VT) {
1692 SDValue TrueValue = getBoolConstant(true, DL, VT, VT);
1693 return getNode(ISD::VP_XOR, DL, VT, Val, TrueValue, Mask, EVL);
1694}
1695
1697 SDValue Mask, SDValue EVL) {
1698 return getVPZExtOrTrunc(DL, VT, Op, Mask, EVL);
1699}
1700
1702 SDValue Mask, SDValue EVL) {
1703 if (VT.bitsGT(Op.getValueType()))
1704 return getNode(ISD::VP_ZERO_EXTEND, DL, VT, Op, Mask, EVL);
1705 if (VT.bitsLT(Op.getValueType()))
1706 return getNode(ISD::VP_TRUNCATE, DL, VT, Op, Mask, EVL);
1707 return Op;
1708}
1709
1711 EVT OpVT) {
1712 if (!V)
1713 return getConstant(0, DL, VT);
1714
1715 switch (TLI->getBooleanContents(OpVT)) {
1718 return getConstant(1, DL, VT);
1720 return getAllOnesConstant(DL, VT);
1721 }
1722 llvm_unreachable("Unexpected boolean content enum!");
1723}
1724
1726 bool isT, bool isO) {
1727 return getConstant(APInt(VT.getScalarSizeInBits(), Val, /*isSigned=*/false),
1728 DL, VT, isT, isO);
1729}
1730
1732 bool isT, bool isO) {
1733 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1734}
1735
1737 EVT VT, bool isT, bool isO) {
1738 assert(VT.isInteger() && "Cannot create FP integer constant!");
1739
1740 EVT EltVT = VT.getScalarType();
1741 const ConstantInt *Elt = &Val;
1742
1743 // Vector splats are explicit within the DAG, with ConstantSDNode holding the
1744 // to-be-splatted scalar ConstantInt.
1745 if (isa<VectorType>(Elt->getType()))
1746 Elt = ConstantInt::get(*getContext(), Elt->getValue());
1747
1748 // In some cases the vector type is legal but the element type is illegal and
1749 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1750 // inserted value (the type does not need to match the vector element type).
1751 // Any extra bits introduced will be truncated away.
1752 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1754 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1755 APInt NewVal;
1756 if (TLI->isSExtCheaperThanZExt(VT.getScalarType(), EltVT))
1757 NewVal = Elt->getValue().sextOrTrunc(EltVT.getSizeInBits());
1758 else
1759 NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
1760 Elt = ConstantInt::get(*getContext(), NewVal);
1761 }
1762 // In other cases the element type is illegal and needs to be expanded, for
1763 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1764 // the value into n parts and use a vector type with n-times the elements.
1765 // Then bitcast to the type requested.
1766 // Legalizing constants too early makes the DAGCombiner's job harder so we
1767 // only legalize if the DAG tells us we must produce legal types.
1768 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1769 TLI->getTypeAction(*getContext(), EltVT) ==
1771 const APInt &NewVal = Elt->getValue();
1772 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1773 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1774
1775 // For scalable vectors, try to use a SPLAT_VECTOR_PARTS node.
1776 if (VT.isScalableVector() ||
1777 TLI->isOperationLegal(ISD::SPLAT_VECTOR, VT)) {
1778 assert(EltVT.getSizeInBits() % ViaEltSizeInBits == 0 &&
1779 "Can only handle an even split!");
1780 unsigned Parts = EltVT.getSizeInBits() / ViaEltSizeInBits;
1781
1782 SmallVector<SDValue, 2> ScalarParts;
1783 for (unsigned i = 0; i != Parts; ++i)
1784 ScalarParts.push_back(getConstant(
1785 NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL,
1786 ViaEltVT, isT, isO));
1787
1788 return getNode(ISD::SPLAT_VECTOR_PARTS, DL, VT, ScalarParts);
1789 }
1790
1791 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1792 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1793
1794 // Check the temporary vector is the correct size. If this fails then
1795 // getTypeToTransformTo() probably returned a type whose size (in bits)
1796 // isn't a power-of-2 factor of the requested type size.
1797 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1798
1799 SmallVector<SDValue, 2> EltParts;
1800 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i)
1801 EltParts.push_back(getConstant(
1802 NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL,
1803 ViaEltVT, isT, isO));
1804
1805 // EltParts is currently in little endian order. If we actually want
1806 // big-endian order then reverse it now.
1807 if (getDataLayout().isBigEndian())
1808 std::reverse(EltParts.begin(), EltParts.end());
1809
1810 // The elements must be reversed when the element order is different
1811 // to the endianness of the elements (because the BITCAST is itself a
1812 // vector shuffle in this situation). However, we do not need any code to
1813 // perform this reversal because getConstant() is producing a vector
1814 // splat.
1815 // This situation occurs in MIPS MSA.
1816
1818 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1819 llvm::append_range(Ops, EltParts);
1820
1821 SDValue V =
1822 getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1823 return V;
1824 }
1825
1826 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1827 "APInt size does not match type size!");
1828 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1829 SDVTList VTs = getVTList(EltVT);
1831 AddNodeIDNode(ID, Opc, VTs, {});
1832 ID.AddPointer(Elt);
1833 ID.AddBoolean(isO);
1834 void *IP = nullptr;
1835 SDNode *N = nullptr;
1836 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1837 if (!VT.isVector())
1838 return SDValue(N, 0);
1839
1840 if (!N) {
1841 N = newSDNode<ConstantSDNode>(isT, isO, Elt, VTs);
1842 if (!isT)
1843 N->setDebugLoc(DL.getDebugLoc());
1844 CSEMap.InsertNode(N, IP);
1845 InsertNode(N);
1846 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this);
1847 }
1848
1849 SDValue Result(N, 0);
1850 if (VT.isVector())
1851 Result = getSplat(VT, DL, Result);
1852 return Result;
1853}
1854
1856 bool isT, bool isO) {
1857 unsigned Size = VT.getScalarSizeInBits();
1858 return getConstant(APInt(Size, Val, /*isSigned=*/true), DL, VT, isT, isO);
1859}
1860
1862 bool IsOpaque) {
1864 IsTarget, IsOpaque);
1865}
1866
1868 bool isTarget) {
1869 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1870}
1871
1873 const SDLoc &DL) {
1874 assert(VT.isInteger() && "Shift amount is not an integer type!");
1875 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout());
1876 return getConstant(Val, DL, ShiftVT);
1877}
1878
1880 const SDLoc &DL) {
1881 assert(Val.ult(VT.getScalarSizeInBits()) && "Out of range shift");
1882 return getShiftAmountConstant(Val.getZExtValue(), VT, DL);
1883}
1884
1886 bool isTarget) {
1887 return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget);
1888}
1889
1891 bool isTarget) {
1892 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1893}
1894
1896 EVT VT, bool isTarget) {
1897 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1898
1899 EVT EltVT = VT.getScalarType();
1900 const ConstantFP *Elt = &V;
1901
1902 // Vector splats are explicit within the DAG, with ConstantFPSDNode holding
1903 // the to-be-splatted scalar ConstantFP.
1904 if (isa<VectorType>(Elt->getType()))
1905 Elt = ConstantFP::get(*getContext(), Elt->getValue());
1906
1907 // Do the map lookup using the actual bit pattern for the floating point
1908 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1909 // we don't have issues with SNANs.
1910 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1911 SDVTList VTs = getVTList(EltVT);
1913 AddNodeIDNode(ID, Opc, VTs, {});
1914 ID.AddPointer(Elt);
1915 void *IP = nullptr;
1916 SDNode *N = nullptr;
1917 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1918 if (!VT.isVector())
1919 return SDValue(N, 0);
1920
1921 if (!N) {
1922 N = newSDNode<ConstantFPSDNode>(isTarget, Elt, VTs);
1923 CSEMap.InsertNode(N, IP);
1924 InsertNode(N);
1925 }
1926
1927 SDValue Result(N, 0);
1928 if (VT.isVector())
1929 Result = getSplat(VT, DL, Result);
1930 NewSDValueDbgMsg(Result, "Creating fp constant: ", this);
1931 return Result;
1932}
1933
1935 bool isTarget) {
1936 EVT EltVT = VT.getScalarType();
1937 if (EltVT == MVT::f32)
1938 return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1939 if (EltVT == MVT::f64)
1940 return getConstantFP(APFloat(Val), DL, VT, isTarget);
1941 if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1942 EltVT == MVT::f16 || EltVT == MVT::bf16) {
1943 bool Ignored;
1944 APFloat APF = APFloat(Val);
1946 &Ignored);
1947 return getConstantFP(APF, DL, VT, isTarget);
1948 }
1949 llvm_unreachable("Unsupported type in getConstantFP");
1950}
1951
1953 EVT VT, int64_t Offset, bool isTargetGA,
1954 unsigned TargetFlags) {
1955 assert((TargetFlags == 0 || isTargetGA) &&
1956 "Cannot set target flags on target-independent globals");
1957
1958 // Truncate (with sign-extension) the offset value to the pointer size.
1960 if (BitWidth < 64)
1962
1963 unsigned Opc;
1964 if (GV->isThreadLocal())
1966 else
1968
1969 SDVTList VTs = getVTList(VT);
1971 AddNodeIDNode(ID, Opc, VTs, {});
1972 ID.AddPointer(GV);
1973 ID.AddInteger(Offset);
1974 ID.AddInteger(TargetFlags);
1975 void *IP = nullptr;
1976 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1977 return SDValue(E, 0);
1978
1979 auto *N = newSDNode<GlobalAddressSDNode>(
1980 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VTs, Offset, TargetFlags);
1981 CSEMap.InsertNode(N, IP);
1982 InsertNode(N);
1983 return SDValue(N, 0);
1984}
1985
1987 SDVTList VTs = getVTList(MVT::Untyped);
1990 ID.AddPointer(GV);
1991 void *IP = nullptr;
1992 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP))
1993 return SDValue(E, 0);
1994
1995 auto *N = newSDNode<DeactivationSymbolSDNode>(GV, VTs);
1996 CSEMap.InsertNode(N, IP);
1997 InsertNode(N);
1998 return SDValue(N, 0);
1999}
2000
2001SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
2002 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
2003 SDVTList VTs = getVTList(VT);
2005 AddNodeIDNode(ID, Opc, VTs, {});
2006 ID.AddInteger(FI);
2007 void *IP = nullptr;
2008 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2009 return SDValue(E, 0);
2010
2011 auto *N = newSDNode<FrameIndexSDNode>(FI, VTs, isTarget);
2012 CSEMap.InsertNode(N, IP);
2013 InsertNode(N);
2014 return SDValue(N, 0);
2015}
2016
2017SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
2018 unsigned TargetFlags) {
2019 assert((TargetFlags == 0 || isTarget) &&
2020 "Cannot set target flags on target-independent jump tables");
2021 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
2022 SDVTList VTs = getVTList(VT);
2024 AddNodeIDNode(ID, Opc, VTs, {});
2025 ID.AddInteger(JTI);
2026 ID.AddInteger(TargetFlags);
2027 void *IP = nullptr;
2028 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2029 return SDValue(E, 0);
2030
2031 auto *N = newSDNode<JumpTableSDNode>(JTI, VTs, isTarget, TargetFlags);
2032 CSEMap.InsertNode(N, IP);
2033 InsertNode(N);
2034 return SDValue(N, 0);
2035}
2036
2038 const SDLoc &DL) {
2040 return getNode(ISD::JUMP_TABLE_DEBUG_INFO, DL, MVT::Other, Chain,
2041 getTargetConstant(static_cast<uint64_t>(JTI), DL, PTy, true));
2042}
2043
2045 MaybeAlign Alignment, int Offset,
2046 bool isTarget, unsigned TargetFlags) {
2047 assert((TargetFlags == 0 || isTarget) &&
2048 "Cannot set target flags on target-independent globals");
2049 if (!Alignment)
2050 Alignment = shouldOptForSize()
2051 ? getDataLayout().getABITypeAlign(C->getType())
2052 : getDataLayout().getPrefTypeAlign(C->getType());
2053 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
2054 SDVTList VTs = getVTList(VT);
2056 AddNodeIDNode(ID, Opc, VTs, {});
2057 ID.AddInteger(Alignment->value());
2058 ID.AddInteger(Offset);
2059 ID.AddPointer(C);
2060 ID.AddInteger(TargetFlags);
2061 void *IP = nullptr;
2062 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2063 return SDValue(E, 0);
2064
2065 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VTs, Offset, *Alignment,
2066 TargetFlags);
2067 CSEMap.InsertNode(N, IP);
2068 InsertNode(N);
2069 SDValue V = SDValue(N, 0);
2070 NewSDValueDbgMsg(V, "Creating new constant pool: ", this);
2071 return V;
2072}
2073
2075 MaybeAlign Alignment, int Offset,
2076 bool isTarget, unsigned TargetFlags) {
2077 assert((TargetFlags == 0 || isTarget) &&
2078 "Cannot set target flags on target-independent globals");
2079 if (!Alignment)
2080 Alignment = getDataLayout().getPrefTypeAlign(C->getType());
2081 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
2082 SDVTList VTs = getVTList(VT);
2084 AddNodeIDNode(ID, Opc, VTs, {});
2085 ID.AddInteger(Alignment->value());
2086 ID.AddInteger(Offset);
2087 C->addSelectionDAGCSEId(ID);
2088 ID.AddInteger(TargetFlags);
2089 void *IP = nullptr;
2090 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2091 return SDValue(E, 0);
2092
2093 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VTs, Offset, *Alignment,
2094 TargetFlags);
2095 CSEMap.InsertNode(N, IP);
2096 InsertNode(N);
2097 return SDValue(N, 0);
2098}
2099
2102 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), {});
2103 ID.AddPointer(MBB);
2104 void *IP = nullptr;
2105 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2106 return SDValue(E, 0);
2107
2108 auto *N = newSDNode<BasicBlockSDNode>(MBB);
2109 CSEMap.InsertNode(N, IP);
2110 InsertNode(N);
2111 return SDValue(N, 0);
2112}
2113
2115 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
2116 ValueTypeNodes.size())
2117 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
2118
2119 SDNode *&N = VT.isExtended() ?
2120 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
2121
2122 if (N) return SDValue(N, 0);
2123 N = newSDNode<VTSDNode>(VT);
2124 InsertNode(N);
2125 return SDValue(N, 0);
2126}
2127
2129 SDNode *&N = ExternalSymbols[Sym];
2130 if (N) return SDValue(N, 0);
2131 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, getVTList(VT));
2132 InsertNode(N);
2133 return SDValue(N, 0);
2134}
2135
2136SDValue SelectionDAG::getExternalSymbol(RTLIB::LibcallImpl Libcall, EVT VT) {
2138 return getExternalSymbol(SymName.data(), VT);
2139}
2140
2142 SDNode *&N = MCSymbols[Sym];
2143 if (N)
2144 return SDValue(N, 0);
2145 N = newSDNode<MCSymbolSDNode>(Sym, getVTList(VT));
2146 InsertNode(N);
2147 return SDValue(N, 0);
2148}
2149
2151 unsigned TargetFlags) {
2152 SDNode *&N =
2153 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)];
2154 if (N) return SDValue(N, 0);
2155 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, getVTList(VT));
2156 InsertNode(N);
2157 return SDValue(N, 0);
2158}
2159
2161 EVT VT, unsigned TargetFlags) {
2163 return getTargetExternalSymbol(SymName.data(), VT, TargetFlags);
2164}
2165
2167 if ((unsigned)Cond >= CondCodeNodes.size())
2168 CondCodeNodes.resize(Cond+1);
2169
2170 if (!CondCodeNodes[Cond]) {
2171 auto *N = newSDNode<CondCodeSDNode>(Cond);
2172 CondCodeNodes[Cond] = N;
2173 InsertNode(N);
2174 }
2175
2176 return SDValue(CondCodeNodes[Cond], 0);
2177}
2178
2180 assert(MulImm.getBitWidth() == VT.getSizeInBits() &&
2181 "APInt size does not match type size!");
2182
2183 if (MulImm == 0)
2184 return getConstant(0, DL, VT);
2185
2186 const MachineFunction &MF = getMachineFunction();
2187 const Function &F = MF.getFunction();
2188 ConstantRange CR = getVScaleRange(&F, 64);
2189 if (const APInt *C = CR.getSingleElement())
2190 return getConstant(MulImm * C->getZExtValue(), DL, VT);
2191
2192 return getNode(ISD::VSCALE, DL, VT, getConstant(MulImm, DL, VT));
2193}
2194
2195/// \returns a value of type \p VT that represents the runtime value of \p
2196/// Quantity, i.e. scaled by vscale if it's scalable, or a fixed constant
2197/// otherwise. Quantity should be a FixedOrScalableQuantity, i.e. ElementCount
2198/// or TypeSize.
2199template <typename Ty>
2201 EVT VT, Ty Quantity) {
2202 if (Quantity.isScalable())
2203 return DAG.getVScale(
2204 DL, VT, APInt(VT.getSizeInBits(), Quantity.getKnownMinValue()));
2205
2206 return DAG.getConstant(Quantity.getKnownMinValue(), DL, VT);
2207}
2208
2210 ElementCount EC) {
2211 return getFixedOrScalableQuantity(*this, DL, VT, EC);
2212}
2213
2215 return getFixedOrScalableQuantity(*this, DL, VT, TS);
2216}
2217
2219 ElementCount EC) {
2220 EVT IdxVT = TLI->getVectorIdxTy(getDataLayout());
2221 EVT MaskVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), DataVT);
2222 return getNode(ISD::GET_ACTIVE_LANE_MASK, DL, MaskVT,
2223 getConstant(0, DL, IdxVT), getElementCount(DL, IdxVT, EC));
2224}
2225
2227 APInt One(ResVT.getScalarSizeInBits(), 1);
2228 return getStepVector(DL, ResVT, One);
2229}
2230
2232 const APInt &StepVal) {
2233 assert(ResVT.getScalarSizeInBits() == StepVal.getBitWidth());
2234 if (ResVT.isScalableVector())
2235 return getNode(
2236 ISD::STEP_VECTOR, DL, ResVT,
2237 getTargetConstant(StepVal, DL, ResVT.getVectorElementType()));
2238
2239 SmallVector<SDValue, 16> OpsStepConstants;
2240 for (uint64_t i = 0; i < ResVT.getVectorNumElements(); i++)
2241 OpsStepConstants.push_back(
2242 getConstant(StepVal * i, DL, ResVT.getVectorElementType()));
2243 return getBuildVector(ResVT, DL, OpsStepConstants);
2244}
2245
2246/// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
2247/// point at N1 to point at N2 and indices that point at N2 to point at N1.
2252
2254 SDValue N2, ArrayRef<int> Mask) {
2255 assert(VT.getVectorNumElements() == Mask.size() &&
2256 "Must have the same number of vector elements as mask elements!");
2257 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
2258 "Invalid VECTOR_SHUFFLE");
2259
2260 // Canonicalize shuffle undef, undef -> undef
2261 if (N1.isUndef() && N2.isUndef())
2262 return getUNDEF(VT);
2263
2264 // Validate that all indices in Mask are within the range of the elements
2265 // input to the shuffle.
2266 int NElts = Mask.size();
2267 assert(llvm::all_of(Mask,
2268 [&](int M) { return M < (NElts * 2) && M >= -1; }) &&
2269 "Index out of range");
2270
2271 // Copy the mask so we can do any needed cleanup.
2272 SmallVector<int, 8> MaskVec(Mask);
2273
2274 // Canonicalize shuffle v, v -> v, undef
2275 if (N1 == N2) {
2276 N2 = getUNDEF(VT);
2277 for (int i = 0; i != NElts; ++i)
2278 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
2279 }
2280
2281 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
2282 if (N1.isUndef())
2283 commuteShuffle(N1, N2, MaskVec);
2284
2285 if (TLI->hasVectorBlend()) {
2286 // If shuffling a splat, try to blend the splat instead. We do this here so
2287 // that even when this arises during lowering we don't have to re-handle it.
2288 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
2289 BitVector UndefElements;
2290 SDValue Splat = BV->getSplatValue(&UndefElements);
2291 if (!Splat)
2292 return;
2293
2294 for (int i = 0; i < NElts; ++i) {
2295 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
2296 continue;
2297
2298 // If this input comes from undef, mark it as such.
2299 if (UndefElements[MaskVec[i] - Offset]) {
2300 MaskVec[i] = -1;
2301 continue;
2302 }
2303
2304 // If we can blend a non-undef lane, use that instead.
2305 if (!UndefElements[i])
2306 MaskVec[i] = i + Offset;
2307 }
2308 };
2309 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
2310 BlendSplat(N1BV, 0);
2311 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
2312 BlendSplat(N2BV, NElts);
2313 }
2314
2315 // Canonicalize all index into lhs, -> shuffle lhs, undef
2316 // Canonicalize all index into rhs, -> shuffle rhs, undef
2317 bool AllLHS = true, AllRHS = true;
2318 bool N2Undef = N2.isUndef();
2319 for (int i = 0; i != NElts; ++i) {
2320 if (MaskVec[i] >= NElts) {
2321 if (N2Undef)
2322 MaskVec[i] = -1;
2323 else
2324 AllLHS = false;
2325 } else if (MaskVec[i] >= 0) {
2326 AllRHS = false;
2327 }
2328 }
2329 if (AllLHS && AllRHS)
2330 return getUNDEF(VT);
2331 if (AllLHS && !N2Undef)
2332 N2 = getUNDEF(VT);
2333 if (AllRHS) {
2334 N1 = getUNDEF(VT);
2335 commuteShuffle(N1, N2, MaskVec);
2336 }
2337 // Reset our undef status after accounting for the mask.
2338 N2Undef = N2.isUndef();
2339 // Re-check whether both sides ended up undef.
2340 if (N1.isUndef() && N2Undef)
2341 return getUNDEF(VT);
2342
2343 // If Identity shuffle return that node.
2344 bool Identity = true, AllSame = true;
2345 for (int i = 0; i != NElts; ++i) {
2346 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
2347 if (MaskVec[i] != MaskVec[0]) AllSame = false;
2348 }
2349 if (Identity && NElts)
2350 return N1;
2351
2352 // Shuffling a constant splat doesn't change the result.
2353 if (N2Undef) {
2354 SDValue V = N1;
2355
2356 // Look through any bitcasts. We check that these don't change the number
2357 // (and size) of elements and just changes their types.
2358 while (V.getOpcode() == ISD::BITCAST)
2359 V = V->getOperand(0);
2360
2361 // A splat should always show up as a build vector node.
2362 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
2363 BitVector UndefElements;
2364 SDValue Splat = BV->getSplatValue(&UndefElements);
2365 // If this is a splat of an undef, shuffling it is also undef.
2366 if (Splat && Splat.isUndef())
2367 return getUNDEF(VT);
2368
2369 bool SameNumElts =
2370 V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
2371
2372 // We only have a splat which can skip shuffles if there is a splatted
2373 // value and no undef lanes rearranged by the shuffle.
2374 if (Splat && UndefElements.none()) {
2375 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
2376 // number of elements match or the value splatted is a zero constant.
2377 if (SameNumElts || isNullConstant(Splat))
2378 return N1;
2379 }
2380
2381 // If the shuffle itself creates a splat, build the vector directly.
2382 if (AllSame && SameNumElts) {
2383 EVT BuildVT = BV->getValueType(0);
2384 const SDValue &Splatted = BV->getOperand(MaskVec[0]);
2385 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
2386
2387 // We may have jumped through bitcasts, so the type of the
2388 // BUILD_VECTOR may not match the type of the shuffle.
2389 if (BuildVT != VT)
2390 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
2391 return NewBV;
2392 }
2393 }
2394 }
2395
2396 SDVTList VTs = getVTList(VT);
2398 SDValue Ops[2] = { N1, N2 };
2400 for (int i = 0; i != NElts; ++i)
2401 ID.AddInteger(MaskVec[i]);
2402
2403 void* IP = nullptr;
2404 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
2405 return SDValue(E, 0);
2406
2407 // Allocate the mask array for the node out of the BumpPtrAllocator, since
2408 // SDNode doesn't have access to it. This memory will be "leaked" when
2409 // the node is deallocated, but recovered when the NodeAllocator is released.
2410 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
2411 llvm::copy(MaskVec, MaskAlloc);
2412
2413 auto *N = newSDNode<ShuffleVectorSDNode>(VTs, dl.getIROrder(),
2414 dl.getDebugLoc(), MaskAlloc);
2415 createOperands(N, Ops);
2416
2417 CSEMap.InsertNode(N, IP);
2418 InsertNode(N);
2419 SDValue V = SDValue(N, 0);
2420 NewSDValueDbgMsg(V, "Creating new node: ", this);
2421 return V;
2422}
2423
2425 EVT VT = SV.getValueType(0);
2426 SmallVector<int, 8> MaskVec(SV.getMask());
2428
2429 SDValue Op0 = SV.getOperand(0);
2430 SDValue Op1 = SV.getOperand(1);
2431 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
2432}
2433
2435 SDVTList VTs = getVTList(VT);
2437 AddNodeIDNode(ID, ISD::Register, VTs, {});
2438 ID.AddInteger(Reg.id());
2439 void *IP = nullptr;
2440 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2441 return SDValue(E, 0);
2442
2443 auto *N = newSDNode<RegisterSDNode>(Reg, VTs);
2444 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, UA);
2445 CSEMap.InsertNode(N, IP);
2446 InsertNode(N);
2447 return SDValue(N, 0);
2448}
2449
2452 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), {});
2453 ID.AddPointer(RegMask);
2454 void *IP = nullptr;
2455 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2456 return SDValue(E, 0);
2457
2458 auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
2459 CSEMap.InsertNode(N, IP);
2460 InsertNode(N);
2461 return SDValue(N, 0);
2462}
2463
2465 MCSymbol *Label) {
2466 return getLabelNode(ISD::EH_LABEL, dl, Root, Label);
2467}
2468
2469SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl,
2470 SDValue Root, MCSymbol *Label) {
2472 SDValue Ops[] = { Root };
2473 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops);
2474 ID.AddPointer(Label);
2475 void *IP = nullptr;
2476 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2477 return SDValue(E, 0);
2478
2479 auto *N =
2480 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label);
2481 createOperands(N, Ops);
2482
2483 CSEMap.InsertNode(N, IP);
2484 InsertNode(N);
2485 return SDValue(N, 0);
2486}
2487
2489 int64_t Offset, bool isTarget,
2490 unsigned TargetFlags) {
2491 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
2492 SDVTList VTs = getVTList(VT);
2493
2495 AddNodeIDNode(ID, Opc, VTs, {});
2496 ID.AddPointer(BA);
2497 ID.AddInteger(Offset);
2498 ID.AddInteger(TargetFlags);
2499 void *IP = nullptr;
2500 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2501 return SDValue(E, 0);
2502
2503 auto *N = newSDNode<BlockAddressSDNode>(Opc, VTs, BA, Offset, TargetFlags);
2504 CSEMap.InsertNode(N, IP);
2505 InsertNode(N);
2506 return SDValue(N, 0);
2507}
2508
2511 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), {});
2512 ID.AddPointer(V);
2513
2514 void *IP = nullptr;
2515 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2516 return SDValue(E, 0);
2517
2518 auto *N = newSDNode<SrcValueSDNode>(V);
2519 CSEMap.InsertNode(N, IP);
2520 InsertNode(N);
2521 return SDValue(N, 0);
2522}
2523
2526 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), {});
2527 ID.AddPointer(MD);
2528
2529 void *IP = nullptr;
2530 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2531 return SDValue(E, 0);
2532
2533 auto *N = newSDNode<MDNodeSDNode>(MD);
2534 CSEMap.InsertNode(N, IP);
2535 InsertNode(N);
2536 return SDValue(N, 0);
2537}
2538
2540 if (VT == V.getValueType())
2541 return V;
2542
2543 return getNode(ISD::BITCAST, SDLoc(V), VT, V);
2544}
2545
2547 unsigned SrcAS, unsigned DestAS) {
2548 SDVTList VTs = getVTList(VT);
2549 SDValue Ops[] = {Ptr};
2552 ID.AddInteger(SrcAS);
2553 ID.AddInteger(DestAS);
2554
2555 void *IP = nullptr;
2556 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
2557 return SDValue(E, 0);
2558
2559 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
2560 VTs, SrcAS, DestAS);
2561 createOperands(N, Ops);
2562
2563 CSEMap.InsertNode(N, IP);
2564 InsertNode(N);
2565 return SDValue(N, 0);
2566}
2567
2569 return getNode(ISD::FREEZE, SDLoc(V), V.getValueType(), V);
2570}
2571
2573 UndefPoisonKind Kind) {
2574 if (isGuaranteedNotToBeUndefOrPoison(V, DemandedElts, Kind))
2575 return V;
2576 return getFreeze(V);
2577}
2578
2579/// getShiftAmountOperand - Return the specified value casted to
2580/// the target's desired shift amount type.
2582 EVT OpTy = Op.getValueType();
2583 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
2584 if (OpTy == ShTy || OpTy.isVector()) return Op;
2585
2586 return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
2587}
2588
2590 SDLoc dl(Node);
2592 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2593 EVT VT = Node->getValueType(0);
2594 SDValue Tmp1 = Node->getOperand(0);
2595 SDValue Tmp2 = Node->getOperand(1);
2596 const MaybeAlign MA(Node->getConstantOperandVal(3));
2597
2598 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
2599 Tmp2, MachinePointerInfo(V));
2600 SDValue VAList = VAListLoad;
2601
2602 if (MA && *MA > TLI.getMinStackArgumentAlignment()) {
2603 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2604 getConstant(MA->value() - 1, dl, VAList.getValueType()));
2605
2606 VAList = getNode(
2607 ISD::AND, dl, VAList.getValueType(), VAList,
2608 getSignedConstant(-(int64_t)MA->value(), dl, VAList.getValueType()));
2609 }
2610
2611 // Increment the pointer, VAList, to the next vaarg
2612 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2613 getConstant(getDataLayout().getTypeAllocSize(
2614 VT.getTypeForEVT(*getContext())),
2615 dl, VAList.getValueType()));
2616 // Store the incremented VAList to the legalized pointer
2617 Tmp1 =
2618 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
2619 // Load the actual argument out of the pointer VAList
2620 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
2621}
2622
2624 SDLoc dl(Node);
2626 // This defaults to loading a pointer from the input and storing it to the
2627 // output, returning the chain.
2628 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
2629 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
2630 SDValue Tmp1 =
2631 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
2632 Node->getOperand(2), MachinePointerInfo(VS));
2633 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
2634 MachinePointerInfo(VD));
2635}
2636
2638 const DataLayout &DL = getDataLayout();
2639 Type *Ty = VT.getTypeForEVT(*getContext());
2640 Align RedAlign = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2641
2642 if (TLI->isTypeLegal(VT) || !VT.isVector())
2643 return RedAlign;
2644
2645 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2646 const Align StackAlign = TFI->getStackAlign();
2647
2648 // See if we can choose a smaller ABI alignment in cases where it's an
2649 // illegal vector type that will get broken down.
2650 if (RedAlign > StackAlign) {
2651 EVT IntermediateVT;
2652 MVT RegisterVT;
2653 unsigned NumIntermediates;
2654 TLI->getVectorTypeBreakdown(*getContext(), VT, IntermediateVT,
2655 NumIntermediates, RegisterVT);
2656 Ty = IntermediateVT.getTypeForEVT(*getContext());
2657 Align RedAlign2 = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2658 if (RedAlign2 < RedAlign)
2659 RedAlign = RedAlign2;
2660
2661 if (!getMachineFunction().getFrameInfo().isStackRealignable())
2662 // If the stack is not realignable, the alignment should be limited to the
2663 // StackAlignment
2664 RedAlign = std::min(RedAlign, StackAlign);
2665 }
2666
2667 return RedAlign;
2668}
2669
2671 MachineFrameInfo &MFI = MF->getFrameInfo();
2672 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2673 int StackID = 0;
2674 if (Bytes.isScalable())
2675 StackID = TFI->getStackIDForScalableVectors();
2676 // The stack id gives an indication of whether the object is scalable or
2677 // not, so it's safe to pass in the minimum size here.
2678 int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinValue(), Alignment,
2679 false, nullptr, StackID);
2680 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
2681}
2682
2684 Type *Ty = VT.getTypeForEVT(*getContext());
2685 Align StackAlign =
2686 std::max(getDataLayout().getPrefTypeAlign(Ty), Align(minAlign));
2687 return CreateStackTemporary(VT.getStoreSize(), StackAlign);
2688}
2689
2691 TypeSize VT1Size = VT1.getStoreSize();
2692 TypeSize VT2Size = VT2.getStoreSize();
2693 assert(VT1Size.isScalable() == VT2Size.isScalable() &&
2694 "Don't know how to choose the maximum size when creating a stack "
2695 "temporary");
2696 TypeSize Bytes = VT1Size.getKnownMinValue() > VT2Size.getKnownMinValue()
2697 ? VT1Size
2698 : VT2Size;
2699
2700 Type *Ty1 = VT1.getTypeForEVT(*getContext());
2701 Type *Ty2 = VT2.getTypeForEVT(*getContext());
2702 const DataLayout &DL = getDataLayout();
2703 Align Align = std::max(DL.getPrefTypeAlign(Ty1), DL.getPrefTypeAlign(Ty2));
2704 return CreateStackTemporary(Bytes, Align);
2705}
2706
2708 ISD::CondCode Cond, const SDLoc &dl,
2709 SDNodeFlags Flags) {
2710 EVT OpVT = N1.getValueType();
2711
2712 auto GetUndefBooleanConstant = [&]() {
2713 if (VT.getScalarType() == MVT::i1 ||
2714 TLI->getBooleanContents(OpVT) ==
2716 return getUNDEF(VT);
2717 // ZeroOrOne / ZeroOrNegative require specific values for the high bits,
2718 // so we cannot use getUNDEF(). Return zero instead.
2719 return getConstant(0, dl, VT);
2720 };
2721
2722 // These setcc operations always fold.
2723 switch (Cond) {
2724 default: break;
2725 case ISD::SETFALSE:
2726 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT);
2727 case ISD::SETTRUE:
2728 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT);
2729
2730 case ISD::SETOEQ:
2731 case ISD::SETOGT:
2732 case ISD::SETOGE:
2733 case ISD::SETOLT:
2734 case ISD::SETOLE:
2735 case ISD::SETONE:
2736 case ISD::SETO:
2737 case ISD::SETUO:
2738 case ISD::SETUEQ:
2739 case ISD::SETUNE:
2740 assert(!OpVT.isInteger() && "Illegal setcc for integer!");
2741 break;
2742 }
2743
2744 if (OpVT.isInteger()) {
2745 // For EQ and NE, we can always pick a value for the undef to make the
2746 // predicate pass or fail, so we can return undef.
2747 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2748 // icmp eq/ne X, undef -> undef.
2749 if ((N1.isUndef() || N2.isUndef()) &&
2750 (Cond == ISD::SETEQ || Cond == ISD::SETNE))
2751 return GetUndefBooleanConstant();
2752
2753 // If both operands are undef, we can return undef for int comparison.
2754 // icmp undef, undef -> undef.
2755 if (N1.isUndef() && N2.isUndef())
2756 return GetUndefBooleanConstant();
2757
2758 // icmp X, X -> true/false
2759 // icmp X, undef -> true/false because undef could be X.
2760 if (N1.isUndef() || N2.isUndef() || N1 == N2)
2761 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT);
2762 }
2763
2765 const APInt &C2 = N2C->getAPIntValue();
2767 const APInt &C1 = N1C->getAPIntValue();
2768
2770 dl, VT, OpVT);
2771 }
2772 }
2773
2774 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
2775 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
2776
2777 if (N1CFP && N2CFP) {
2778 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF());
2779 switch (Cond) {
2780 default: break;
2781 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
2782 return GetUndefBooleanConstant();
2783 [[fallthrough]];
2784 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT,
2785 OpVT);
2786 case ISD::SETNE: if (R==APFloat::cmpUnordered)
2787 return GetUndefBooleanConstant();
2788 [[fallthrough]];
2790 R==APFloat::cmpLessThan, dl, VT,
2791 OpVT);
2792 case ISD::SETLT: if (R==APFloat::cmpUnordered)
2793 return GetUndefBooleanConstant();
2794 [[fallthrough]];
2795 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT,
2796 OpVT);
2797 case ISD::SETGT: if (R==APFloat::cmpUnordered)
2798 return GetUndefBooleanConstant();
2799 [[fallthrough]];
2801 VT, OpVT);
2802 case ISD::SETLE: if (R==APFloat::cmpUnordered)
2803 return GetUndefBooleanConstant();
2804 [[fallthrough]];
2806 R==APFloat::cmpEqual, dl, VT,
2807 OpVT);
2808 case ISD::SETGE: if (R==APFloat::cmpUnordered)
2809 return GetUndefBooleanConstant();
2810 [[fallthrough]];
2812 R==APFloat::cmpEqual, dl, VT, OpVT);
2813 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT,
2814 OpVT);
2815 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT,
2816 OpVT);
2818 R==APFloat::cmpEqual, dl, VT,
2819 OpVT);
2820 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT,
2821 OpVT);
2823 R==APFloat::cmpLessThan, dl, VT,
2824 OpVT);
2826 R==APFloat::cmpUnordered, dl, VT,
2827 OpVT);
2829 VT, OpVT);
2830 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT,
2831 OpVT);
2832 }
2833 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) {
2834 // Ensure that the constant occurs on the RHS.
2836 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT()))
2837 return SDValue();
2838 return getSetCC(dl, VT, N2, N1, SwappedCond, /*Chain=*/{},
2839 /*IsSignaling=*/false, Flags);
2840 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2841 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) {
2842 // If an operand is known to be a nan (or undef that could be a nan), we can
2843 // fold it.
2844 // Choosing NaN for the undef will always make unordered comparison succeed
2845 // and ordered comparison fails.
2846 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2847 switch (ISD::getUnorderedFlavor(Cond)) {
2848 default:
2849 llvm_unreachable("Unknown flavor!");
2850 case 0: // Known false.
2851 return getBoolConstant(false, dl, VT, OpVT);
2852 case 1: // Known true.
2853 return getBoolConstant(true, dl, VT, OpVT);
2854 case 2: // Undefined.
2855 return GetUndefBooleanConstant();
2856 }
2857 }
2858
2859 // Could not fold it.
2860 return SDValue();
2861}
2862
2863/// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
2864/// use this predicate to simplify operations downstream.
2866 unsigned BitWidth = Op.getScalarValueSizeInBits();
2868}
2869
2870// TODO: Should have argument to specify if sign bit of nan is ignorable.
2872 if (Depth >= MaxRecursionDepth)
2873 return false; // Limit search depth.
2874
2875 unsigned Opc = Op.getOpcode();
2876 switch (Opc) {
2877 case ISD::FABS:
2878 return true;
2879 case ISD::AssertNoFPClass: {
2880 FPClassTest NoFPClass =
2881 static_cast<FPClassTest>(Op.getConstantOperandVal(1));
2882
2883 const FPClassTest TestMask = fcNan | fcNegative;
2884 return (NoFPClass & TestMask) == TestMask;
2885 }
2886 case ISD::ARITH_FENCE:
2887 return SignBitIsZeroFP(Op.getOperand(0), Depth + 1);
2888 case ISD::FEXP:
2889 case ISD::FEXP2:
2890 case ISD::FEXP10:
2891 return Op->getFlags().hasNoNaNs();
2892 case ISD::FMINNUM:
2893 case ISD::FMINNUM_IEEE:
2894 case ISD::FMINIMUM:
2895 case ISD::FMINIMUMNUM:
2896 return SignBitIsZeroFP(Op.getOperand(1), Depth + 1) &&
2897 SignBitIsZeroFP(Op.getOperand(0), Depth + 1);
2898 case ISD::FMAXNUM:
2899 case ISD::FMAXNUM_IEEE:
2900 case ISD::FMAXIMUM:
2901 case ISD::FMAXIMUMNUM:
2902 // TODO: If we can ignore the sign bit of nans, only one side being known 0
2903 // is sufficient.
2904 return SignBitIsZeroFP(Op.getOperand(1), Depth + 1) &&
2905 SignBitIsZeroFP(Op.getOperand(0), Depth + 1);
2906 default:
2907 return false;
2908 }
2909
2910 llvm_unreachable("covered opcode switch");
2911}
2912
2913/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
2914/// this predicate to simplify operations downstream. Mask is known to be zero
2915/// for bits that V cannot have.
2917 unsigned Depth) const {
2918 return Mask.isSubsetOf(computeKnownBits(V, Depth).Zero);
2919}
2920
2921/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in
2922/// DemandedElts. We use this predicate to simplify operations downstream.
2923/// Mask is known to be zero for bits that V cannot have.
2925 const APInt &DemandedElts,
2926 unsigned Depth) const {
2927 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero);
2928}
2929
2930/// MaskedVectorIsZero - Return true if 'Op' is known to be zero in
2931/// DemandedElts. We use this predicate to simplify operations downstream.
2933 unsigned Depth /* = 0 */) const {
2934 return computeKnownBits(V, DemandedElts, Depth).isZero();
2935}
2936
2937/// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'.
2939 unsigned Depth) const {
2940 return Mask.isSubsetOf(computeKnownBits(V, Depth).One);
2941}
2942
2944 const APInt &DemandedElts,
2945 unsigned Depth) const {
2946 EVT VT = Op.getValueType();
2947 assert(VT.isVector() && !VT.isScalableVector() && "Only for fixed vectors!");
2948
2949 unsigned NumElts = VT.getVectorNumElements();
2950 assert(DemandedElts.getBitWidth() == NumElts && "Unexpected demanded mask.");
2951
2952 APInt KnownZeroElements = APInt::getZero(NumElts);
2953 for (unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) {
2954 if (!DemandedElts[EltIdx])
2955 continue; // Don't query elements that are not demanded.
2956 APInt Mask = APInt::getOneBitSet(NumElts, EltIdx);
2957 if (MaskedVectorIsZero(Op, Mask, Depth))
2958 KnownZeroElements.setBit(EltIdx);
2959 }
2960 return KnownZeroElements;
2961}
2962
2963/// isSplatValue - Return true if the vector V has the same value
2964/// across all DemandedElts. For scalable vectors, we don't know the
2965/// number of lanes at compile time. Instead, we use a 1 bit APInt
2966/// to represent a conservative value for all lanes; that is, that
2967/// one bit value is implicitly splatted across all lanes.
2968bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
2969 APInt &UndefElts, unsigned Depth) const {
2970 unsigned Opcode = V.getOpcode();
2971 EVT VT = V.getValueType();
2972 assert(VT.isVector() && "Vector type expected");
2973 assert((!VT.isScalableVector() || DemandedElts.getBitWidth() == 1) &&
2974 "scalable demanded bits are ignored");
2975
2976 if (!DemandedElts)
2977 return false; // No demanded elts, better to assume we don't know anything.
2978
2979 if (Depth >= MaxRecursionDepth)
2980 return false; // Limit search depth.
2981
2982 // Deal with some common cases here that work for both fixed and scalable
2983 // vector types.
2984 switch (Opcode) {
2985 case ISD::SPLAT_VECTOR:
2986 UndefElts = V.getOperand(0).isUndef()
2987 ? APInt::getAllOnes(DemandedElts.getBitWidth())
2988 : APInt(DemandedElts.getBitWidth(), 0);
2989 return true;
2990 case ISD::ADD:
2991 case ISD::SUB:
2992 case ISD::AND:
2993 case ISD::XOR:
2994 case ISD::OR: {
2995 APInt UndefLHS, UndefRHS;
2996 SDValue LHS = V.getOperand(0);
2997 SDValue RHS = V.getOperand(1);
2998 // Only recognize splats with the same demanded undef elements for both
2999 // operands, otherwise we might fail to handle binop-specific undef
3000 // handling.
3001 // e.g. (and undef, 0) -> 0 etc.
3002 if (isSplatValue(LHS, DemandedElts, UndefLHS, Depth + 1) &&
3003 isSplatValue(RHS, DemandedElts, UndefRHS, Depth + 1) &&
3004 (DemandedElts & UndefLHS) == (DemandedElts & UndefRHS)) {
3005 UndefElts = UndefLHS | UndefRHS;
3006 return true;
3007 }
3008 return false;
3009 }
3010 case ISD::ABS:
3012 case ISD::TRUNCATE:
3013 case ISD::SIGN_EXTEND:
3014 case ISD::ZERO_EXTEND:
3015 return isSplatValue(V.getOperand(0), DemandedElts, UndefElts, Depth + 1);
3016 default:
3017 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
3018 Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID)
3019 return TLI->isSplatValueForTargetNode(V, DemandedElts, UndefElts, *this,
3020 Depth);
3021 break;
3022 }
3023
3024 // We don't support other cases than those above for scalable vectors at
3025 // the moment.
3026 if (VT.isScalableVector())
3027 return false;
3028
3029 unsigned NumElts = VT.getVectorNumElements();
3030 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch");
3031 UndefElts = APInt::getZero(NumElts);
3032
3033 switch (Opcode) {
3034 case ISD::BUILD_VECTOR: {
3035 SDValue Scl;
3036 for (unsigned i = 0; i != NumElts; ++i) {
3037 SDValue Op = V.getOperand(i);
3038 if (Op.isUndef()) {
3039 UndefElts.setBit(i);
3040 continue;
3041 }
3042 if (!DemandedElts[i])
3043 continue;
3044 if (Scl && Scl != Op)
3045 return false;
3046 Scl = Op;
3047 }
3048 return true;
3049 }
3050 case ISD::VECTOR_SHUFFLE: {
3051 // Check if this is a shuffle node doing a splat or a shuffle of a splat.
3052 APInt DemandedLHS = APInt::getZero(NumElts);
3053 APInt DemandedRHS = APInt::getZero(NumElts);
3054 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
3055 for (int i = 0; i != (int)NumElts; ++i) {
3056 int M = Mask[i];
3057 if (M < 0) {
3058 UndefElts.setBit(i);
3059 continue;
3060 }
3061 if (!DemandedElts[i])
3062 continue;
3063 if (M < (int)NumElts)
3064 DemandedLHS.setBit(M);
3065 else
3066 DemandedRHS.setBit(M - NumElts);
3067 }
3068
3069 // If we aren't demanding either op, assume there's no splat.
3070 // If we are demanding both ops, assume there's no splat.
3071 if ((DemandedLHS.isZero() && DemandedRHS.isZero()) ||
3072 (!DemandedLHS.isZero() && !DemandedRHS.isZero()))
3073 return false;
3074
3075 // See if the demanded elts of the source op is a splat or we only demand
3076 // one element, which should always be a splat.
3077 // TODO: Handle source ops splats with undefs.
3078 auto CheckSplatSrc = [&](SDValue Src, const APInt &SrcElts) {
3079 APInt SrcUndefs;
3080 return (SrcElts.popcount() == 1) ||
3081 (isSplatValue(Src, SrcElts, SrcUndefs, Depth + 1) &&
3082 (SrcElts & SrcUndefs).isZero());
3083 };
3084 if (!DemandedLHS.isZero())
3085 return CheckSplatSrc(V.getOperand(0), DemandedLHS);
3086 return CheckSplatSrc(V.getOperand(1), DemandedRHS);
3087 }
3089 // Offset the demanded elts by the subvector index.
3090 SDValue Src = V.getOperand(0);
3091 // We don't support scalable vectors at the moment.
3092 if (Src.getValueType().isScalableVector())
3093 return false;
3094 uint64_t Idx = V.getConstantOperandVal(1);
3095 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3096 APInt UndefSrcElts;
3097 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx);
3098 if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
3099 UndefElts = UndefSrcElts.extractBits(NumElts, Idx);
3100 return true;
3101 }
3102 break;
3103 }
3107 // Widen the demanded elts by the src element count.
3108 SDValue Src = V.getOperand(0);
3109 // We don't support scalable vectors at the moment.
3110 if (Src.getValueType().isScalableVector())
3111 return false;
3112 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3113 APInt UndefSrcElts;
3114 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts);
3115 if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
3116 UndefElts = UndefSrcElts.trunc(NumElts);
3117 return true;
3118 }
3119 break;
3120 }
3121 case ISD::BITCAST: {
3122 SDValue Src = V.getOperand(0);
3123 EVT SrcVT = Src.getValueType();
3124 unsigned SrcBitWidth = SrcVT.getScalarSizeInBits();
3125 unsigned BitWidth = VT.getScalarSizeInBits();
3126
3127 // Ignore bitcasts from unsupported types.
3128 // TODO: Add fp support?
3129 if (!SrcVT.isVector() || !SrcVT.isInteger() || !VT.isInteger())
3130 break;
3131
3132 // Bitcast 'small element' vector to 'large element' vector.
3133 if ((BitWidth % SrcBitWidth) == 0) {
3134 // See if each sub element is a splat.
3135 unsigned Scale = BitWidth / SrcBitWidth;
3136 unsigned NumSrcElts = SrcVT.getVectorNumElements();
3137 APInt ScaledDemandedElts =
3138 APIntOps::ScaleBitMask(DemandedElts, NumSrcElts);
3139 for (unsigned I = 0; I != Scale; ++I) {
3140 APInt SubUndefElts;
3141 APInt SubDemandedElt = APInt::getOneBitSet(Scale, I);
3142 APInt SubDemandedElts = APInt::getSplat(NumSrcElts, SubDemandedElt);
3143 SubDemandedElts &= ScaledDemandedElts;
3144 if (!isSplatValue(Src, SubDemandedElts, SubUndefElts, Depth + 1))
3145 return false;
3146 // TODO: Add support for merging sub undef elements.
3147 if (!SubUndefElts.isZero())
3148 return false;
3149 }
3150 return true;
3151 }
3152 break;
3153 }
3154 }
3155
3156 return false;
3157}
3158
3159/// Helper wrapper to main isSplatValue function.
3160bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) const {
3161 EVT VT = V.getValueType();
3162 assert(VT.isVector() && "Vector type expected");
3163
3164 APInt UndefElts;
3165 // Since the number of lanes in a scalable vector is unknown at compile time,
3166 // we track one bit which is implicitly broadcast to all lanes. This means
3167 // that all lanes in a scalable vector are considered demanded.
3168 APInt DemandedElts
3170 return isSplatValue(V, DemandedElts, UndefElts) &&
3171 (AllowUndefs || !UndefElts);
3172}
3173
3176
3177 EVT VT = V.getValueType();
3178 unsigned Opcode = V.getOpcode();
3179 switch (Opcode) {
3180 default: {
3181 APInt UndefElts;
3182 // Since the number of lanes in a scalable vector is unknown at compile time,
3183 // we track one bit which is implicitly broadcast to all lanes. This means
3184 // that all lanes in a scalable vector are considered demanded.
3185 APInt DemandedElts
3187
3188 if (isSplatValue(V, DemandedElts, UndefElts)) {
3189 if (VT.isScalableVector()) {
3190 // DemandedElts and UndefElts are ignored for scalable vectors, since
3191 // the only supported cases are SPLAT_VECTOR nodes.
3192 SplatIdx = 0;
3193 } else {
3194 // Handle case where all demanded elements are UNDEF.
3195 if (DemandedElts.isSubsetOf(UndefElts)) {
3196 SplatIdx = 0;
3197 return getUNDEF(VT);
3198 }
3199 SplatIdx = (UndefElts & DemandedElts).countr_one();
3200 }
3201 return V;
3202 }
3203 break;
3204 }
3205 case ISD::SPLAT_VECTOR:
3206 SplatIdx = 0;
3207 return V;
3208 case ISD::VECTOR_SHUFFLE: {
3209 assert(!VT.isScalableVector());
3210 // Check if this is a shuffle node doing a splat.
3211 // TODO - remove this and rely purely on SelectionDAG::isSplatValue,
3212 // getTargetVShiftNode currently struggles without the splat source.
3213 auto *SVN = cast<ShuffleVectorSDNode>(V);
3214 if (!SVN->isSplat())
3215 break;
3216 int Idx = SVN->getSplatIndex();
3217 int NumElts = V.getValueType().getVectorNumElements();
3218 SplatIdx = Idx % NumElts;
3219 return V.getOperand(Idx / NumElts);
3220 }
3221 }
3222
3223 return SDValue();
3224}
3225
3227 int SplatIdx;
3228 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) {
3229 EVT SVT = SrcVector.getValueType().getScalarType();
3230 EVT LegalSVT = SVT;
3231 if (LegalTypes && !TLI->isTypeLegal(SVT)) {
3232 if (!SVT.isInteger())
3233 return SDValue();
3234 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
3235 if (LegalSVT.bitsLT(SVT))
3236 return SDValue();
3237 }
3238 return getExtractVectorElt(SDLoc(V), LegalSVT, SrcVector, SplatIdx);
3239 }
3240 return SDValue();
3241}
3242
3243std::optional<ConstantRange>
3245 unsigned Depth) const {
3246 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
3247 V.getOpcode() == ISD::SRA) &&
3248 "Unknown shift node");
3249 // Shifting more than the bitwidth is not valid.
3250 unsigned BitWidth = V.getScalarValueSizeInBits();
3251
3252 if (auto *Cst = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
3253 const APInt &ShAmt = Cst->getAPIntValue();
3254 if (ShAmt.uge(BitWidth))
3255 return std::nullopt;
3256 return ConstantRange(ShAmt);
3257 }
3258
3259 if (auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1))) {
3260 const APInt *MinAmt = nullptr, *MaxAmt = nullptr;
3261 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
3262 if (!DemandedElts[i])
3263 continue;
3264 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
3265 if (!SA) {
3266 MinAmt = MaxAmt = nullptr;
3267 break;
3268 }
3269 const APInt &ShAmt = SA->getAPIntValue();
3270 if (ShAmt.uge(BitWidth))
3271 return std::nullopt;
3272 if (!MinAmt || MinAmt->ugt(ShAmt))
3273 MinAmt = &ShAmt;
3274 if (!MaxAmt || MaxAmt->ult(ShAmt))
3275 MaxAmt = &ShAmt;
3276 }
3277 assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&
3278 "Failed to find matching min/max shift amounts");
3279 if (MinAmt && MaxAmt)
3280 return ConstantRange(*MinAmt, *MaxAmt + 1);
3281 }
3282
3283 // Use computeKnownBits to find a hidden constant/knownbits (usually type
3284 // legalized). e.g. Hidden behind multiple bitcasts/build_vector/casts etc.
3285 KnownBits KnownAmt = computeKnownBits(V.getOperand(1), DemandedElts, Depth);
3286 if (KnownAmt.getMaxValue().ult(BitWidth))
3287 return ConstantRange::fromKnownBits(KnownAmt, /*IsSigned=*/false);
3288
3289 return std::nullopt;
3290}
3291
3292std::optional<unsigned>
3294 unsigned Depth) const {
3295 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
3296 V.getOpcode() == ISD::SRA) &&
3297 "Unknown shift node");
3298 if (std::optional<ConstantRange> AmtRange =
3299 getValidShiftAmountRange(V, DemandedElts, Depth))
3300 if (const APInt *ShAmt = AmtRange->getSingleElement())
3301 return ShAmt->getZExtValue();
3302 return std::nullopt;
3303}
3304
3305std::optional<unsigned>
3307 APInt DemandedElts = getDemandAllEltsMask(V);
3308 return getValidShiftAmount(V, DemandedElts, Depth);
3309}
3310
3311std::optional<unsigned>
3313 unsigned Depth) const {
3314 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
3315 V.getOpcode() == ISD::SRA) &&
3316 "Unknown shift node");
3317 if (std::optional<ConstantRange> AmtRange =
3318 getValidShiftAmountRange(V, DemandedElts, Depth))
3319 return AmtRange->getUnsignedMin().getZExtValue();
3320 return std::nullopt;
3321}
3322
3323std::optional<unsigned>
3325 APInt DemandedElts = getDemandAllEltsMask(V);
3326 return getValidMinimumShiftAmount(V, DemandedElts, Depth);
3327}
3328
3329std::optional<unsigned>
3331 unsigned Depth) const {
3332 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
3333 V.getOpcode() == ISD::SRA) &&
3334 "Unknown shift node");
3335 if (std::optional<ConstantRange> AmtRange =
3336 getValidShiftAmountRange(V, DemandedElts, Depth))
3337 return AmtRange->getUnsignedMax().getZExtValue();
3338 return std::nullopt;
3339}
3340
3341std::optional<unsigned>
3343 APInt DemandedElts = getDemandAllEltsMask(V);
3344 return getValidMaximumShiftAmount(V, DemandedElts, Depth);
3345}
3346
3347/// Determine which bits of Op are known to be either zero or one and return
3348/// them in Known. For vectors, the known bits are those that are shared by
3349/// every vector element.
3351 APInt DemandedElts = getDemandAllEltsMask(Op);
3352 return computeKnownBits(Op, DemandedElts, Depth);
3353}
3354
3355/// Determine which bits of Op are known to be either zero or one and return
3356/// them in Known. The DemandedElts argument allows us to only collect the known
3357/// bits that are shared by the requested vector elements.
3359 unsigned Depth) const {
3360 unsigned BitWidth = Op.getScalarValueSizeInBits();
3361
3362 KnownBits Known(BitWidth); // Don't know anything.
3363
3364 if (auto OptAPInt = Op->bitcastToAPInt()) {
3365 // We know all of the bits for a constant!
3366 return KnownBits::makeConstant(*std::move(OptAPInt));
3367 }
3368
3369 if (Depth >= MaxRecursionDepth)
3370 return Known; // Limit search depth.
3371
3372 KnownBits Known2;
3373 unsigned NumElts = DemandedElts.getBitWidth();
3374 assert((!Op.getValueType().isScalableVector() || NumElts == 1) &&
3375 "DemandedElts for scalable vectors must be 1 to represent all lanes");
3376 assert((!Op.getValueType().isFixedLengthVector() ||
3377 NumElts == Op.getValueType().getVectorNumElements()) &&
3378 "Unexpected vector size");
3379
3380 if (!DemandedElts)
3381 return Known; // No demanded elts, better to assume we don't know anything.
3382
3383 unsigned Opcode = Op.getOpcode();
3384 switch (Opcode) {
3385 case ISD::MERGE_VALUES:
3386 return computeKnownBits(Op.getOperand(Op.getResNo()), DemandedElts,
3387 Depth + 1);
3388 case ISD::SPLAT_VECTOR: {
3389 SDValue SrcOp = Op.getOperand(0);
3390 assert(SrcOp.getValueSizeInBits() >= BitWidth &&
3391 "Expected SPLAT_VECTOR implicit truncation");
3392 // Implicitly truncate the bits to match the official semantics of
3393 // SPLAT_VECTOR.
3394 Known = computeKnownBits(SrcOp, Depth + 1).trunc(BitWidth);
3395 break;
3396 }
3398 unsigned ScalarSize = Op.getOperand(0).getScalarValueSizeInBits();
3399 assert(ScalarSize * Op.getNumOperands() == BitWidth &&
3400 "Expected SPLAT_VECTOR_PARTS scalars to cover element width");
3401 for (auto [I, SrcOp] : enumerate(Op->ops())) {
3402 Known.insertBits(computeKnownBits(SrcOp, Depth + 1), ScalarSize * I);
3403 }
3404 break;
3405 }
3406 case ISD::STEP_VECTOR: {
3407 const APInt &Step = Op.getConstantOperandAPInt(0);
3408
3409 if (Step.isPowerOf2())
3410 Known.Zero.setLowBits(Step.logBase2());
3411
3413
3414 if (!isUIntN(BitWidth, Op.getValueType().getVectorMinNumElements()))
3415 break;
3416 const APInt MinNumElts =
3417 APInt(BitWidth, Op.getValueType().getVectorMinNumElements());
3418
3419 bool Overflow;
3420 const APInt MaxNumElts = getVScaleRange(&F, BitWidth)
3422 .umul_ov(MinNumElts, Overflow);
3423 if (Overflow)
3424 break;
3425
3426 const APInt MaxValue = (MaxNumElts - 1).umul_ov(Step, Overflow);
3427 if (Overflow)
3428 break;
3429
3430 Known.Zero.setHighBits(MaxValue.countl_zero());
3431 break;
3432 }
3433 case ISD::BUILD_VECTOR:
3434 assert(!Op.getValueType().isScalableVector());
3435 // Collect the known bits that are shared by every demanded vector element.
3436 Known.setAllConflict();
3437 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
3438 if (!DemandedElts[i])
3439 continue;
3440
3441 SDValue SrcOp = Op.getOperand(i);
3442 Known2 = computeKnownBits(SrcOp, Depth + 1);
3443
3444 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
3445 if (SrcOp.getValueSizeInBits() != BitWidth) {
3446 assert(SrcOp.getValueSizeInBits() > BitWidth &&
3447 "Expected BUILD_VECTOR implicit truncation");
3448 Known2 = Known2.trunc(BitWidth);
3449 }
3450
3451 // Known bits are the values that are shared by every demanded element.
3452 Known = Known.intersectWith(Known2);
3453
3454 // If we don't know any bits, early out.
3455 if (Known.isUnknown())
3456 break;
3457 }
3458 break;
3459 case ISD::VECTOR_COMPRESS: {
3460 SDValue Vec = Op.getOperand(0);
3461 SDValue PassThru = Op.getOperand(2);
3462 Known = computeKnownBits(PassThru, DemandedElts, Depth + 1);
3463 // If we don't know any bits, early out.
3464 if (Known.isUnknown())
3465 break;
3466 Known2 = computeKnownBits(Vec, Depth + 1);
3467 Known = Known.intersectWith(Known2);
3468 break;
3469 }
3470 case ISD::VECTOR_SHUFFLE: {
3471 assert(!Op.getValueType().isScalableVector());
3472 // Collect the known bits that are shared by every vector element referenced
3473 // by the shuffle.
3474 APInt DemandedLHS, DemandedRHS;
3476 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
3477 if (!getShuffleDemandedElts(NumElts, SVN->getMask(), DemandedElts,
3478 DemandedLHS, DemandedRHS))
3479 break;
3480
3481 // Known bits are the values that are shared by every demanded element.
3482 Known.setAllConflict();
3483 if (!!DemandedLHS) {
3484 SDValue LHS = Op.getOperand(0);
3485 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1);
3486 Known = Known.intersectWith(Known2);
3487 }
3488 // If we don't know any bits, early out.
3489 if (Known.isUnknown())
3490 break;
3491 if (!!DemandedRHS) {
3492 SDValue RHS = Op.getOperand(1);
3493 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1);
3494 Known = Known.intersectWith(Known2);
3495 }
3496 break;
3497 }
3498 case ISD::VSCALE: {
3500 const APInt &Multiplier = Op.getConstantOperandAPInt(0);
3501 Known = getVScaleRange(&F, BitWidth).multiply(Multiplier).toKnownBits();
3502 break;
3503 }
3504 case ISD::CONCAT_VECTORS: {
3505 if (Op.getValueType().isScalableVector())
3506 break;
3507 // Split DemandedElts and test each of the demanded subvectors.
3508 Known.setAllConflict();
3509 EVT SubVectorVT = Op.getOperand(0).getValueType();
3510 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
3511 unsigned NumSubVectors = Op.getNumOperands();
3512 for (unsigned i = 0; i != NumSubVectors; ++i) {
3513 APInt DemandedSub =
3514 DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts);
3515 if (!!DemandedSub) {
3516 SDValue Sub = Op.getOperand(i);
3517 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1);
3518 Known = Known.intersectWith(Known2);
3519 }
3520 // If we don't know any bits, early out.
3521 if (Known.isUnknown())
3522 break;
3523 }
3524 break;
3525 }
3526 case ISD::INSERT_SUBVECTOR: {
3527 if (Op.getValueType().isScalableVector())
3528 break;
3529 // Demand any elements from the subvector and the remainder from the src its
3530 // inserted into.
3531 SDValue Src = Op.getOperand(0);
3532 SDValue Sub = Op.getOperand(1);
3533 uint64_t Idx = Op.getConstantOperandVal(2);
3534 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
3535 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
3536 APInt DemandedSrcElts = DemandedElts;
3537 DemandedSrcElts.clearBits(Idx, Idx + NumSubElts);
3538
3539 Known.setAllConflict();
3540 if (!!DemandedSubElts) {
3541 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1);
3542 if (Known.isUnknown())
3543 break; // early-out.
3544 }
3545 if (!!DemandedSrcElts) {
3546 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
3547 Known = Known.intersectWith(Known2);
3548 }
3549 break;
3550 }
3552 // Offset the demanded elts by the subvector index.
3553 SDValue Src = Op.getOperand(0);
3554
3555 APInt DemandedSrcElts;
3556 if (Src.getValueType().isScalableVector())
3557 DemandedSrcElts = APInt(1, 1); // <=> 'demand all elements'
3558 else {
3559 uint64_t Idx = Op.getConstantOperandVal(1);
3560 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3561 DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx);
3562 }
3563 Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
3564 break;
3565 }
3566 case ISD::SCALAR_TO_VECTOR: {
3567 if (Op.getValueType().isScalableVector())
3568 break;
3569 // We know about scalar_to_vector as much as we know about it source,
3570 // which becomes the first element of otherwise unknown vector.
3571 if (DemandedElts != 1)
3572 break;
3573
3574 SDValue N0 = Op.getOperand(0);
3575 Known = computeKnownBits(N0, Depth + 1);
3576 if (N0.getValueSizeInBits() != BitWidth)
3577 Known = Known.trunc(BitWidth);
3578
3579 break;
3580 }
3581 case ISD::BITCAST: {
3582 if (Op.getValueType().isScalableVector())
3583 break;
3584
3585 SDValue N0 = Op.getOperand(0);
3586 EVT SubVT = N0.getValueType();
3587 unsigned SubBitWidth = SubVT.getScalarSizeInBits();
3588
3589 // Ignore bitcasts from unsupported types.
3590 if (!(SubVT.isInteger() || SubVT.isFloatingPoint()))
3591 break;
3592
3593 // Fast handling of 'identity' bitcasts.
3594 if (BitWidth == SubBitWidth) {
3595 Known = computeKnownBits(N0, DemandedElts, Depth + 1);
3596 break;
3597 }
3598
3599 bool IsLE = getDataLayout().isLittleEndian();
3600
3601 // Bitcast 'small element' vector to 'large element' scalar/vector.
3602 if ((BitWidth % SubBitWidth) == 0) {
3603 assert(N0.getValueType().isVector() && "Expected bitcast from vector");
3604
3605 // Collect known bits for the (larger) output by collecting the known
3606 // bits from each set of sub elements and shift these into place.
3607 // We need to separately call computeKnownBits for each set of
3608 // sub elements as the knownbits for each is likely to be different.
3609 unsigned SubScale = BitWidth / SubBitWidth;
3610 APInt SubDemandedElts(NumElts * SubScale, 0);
3611 for (unsigned i = 0; i != NumElts; ++i)
3612 if (DemandedElts[i])
3613 SubDemandedElts.setBit(i * SubScale);
3614
3615 for (unsigned i = 0; i != SubScale; ++i) {
3616 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i),
3617 Depth + 1);
3618 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
3619 Known.insertBits(Known2, SubBitWidth * Shifts);
3620 }
3621 }
3622
3623 // Bitcast 'large element' scalar/vector to 'small element' vector.
3624 if ((SubBitWidth % BitWidth) == 0) {
3625 assert(Op.getValueType().isVector() && "Expected bitcast to vector");
3626
3627 // Collect known bits for the (smaller) output by collecting the known
3628 // bits from the overlapping larger input elements and extracting the
3629 // sub sections we actually care about.
3630 unsigned SubScale = SubBitWidth / BitWidth;
3631 APInt SubDemandedElts =
3632 APIntOps::ScaleBitMask(DemandedElts, NumElts / SubScale);
3633 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1);
3634
3635 Known.setAllConflict();
3636 for (unsigned i = 0; i != NumElts; ++i)
3637 if (DemandedElts[i]) {
3638 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
3639 unsigned Offset = (Shifts % SubScale) * BitWidth;
3640 Known = Known.intersectWith(Known2.extractBits(BitWidth, Offset));
3641 // If we don't know any bits, early out.
3642 if (Known.isUnknown())
3643 break;
3644 }
3645 }
3646 break;
3647 }
3648 case ISD::AND:
3649 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3650 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3651
3652 Known &= Known2;
3653 break;
3654 case ISD::OR:
3655 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3656 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3657
3658 Known |= Known2;
3659 break;
3660 case ISD::XOR:
3661 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3662 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3663
3664 Known ^= Known2;
3665 break;
3666 case ISD::MUL: {
3667 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3668 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3669 bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1);
3670 // TODO: SelfMultiply can be poison, but not undef.
3671 if (SelfMultiply)
3672 SelfMultiply &= isGuaranteedNotToBeUndefOrPoison(
3673 Op.getOperand(0), DemandedElts, UndefPoisonKind::UndefOrPoison,
3674 Depth + 1);
3675 Known = KnownBits::mul(Known, Known2, SelfMultiply);
3676
3677 // If the multiplication is known not to overflow, the product of a number
3678 // with itself is non-negative. Only do this if we didn't already computed
3679 // the opposite value for the sign bit.
3680 if (Op->getFlags().hasNoSignedWrap() &&
3681 Op.getOperand(0) == Op.getOperand(1) &&
3682 !Known.isNegative())
3683 Known.makeNonNegative();
3684 break;
3685 }
3686 case ISD::MULHU: {
3687 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3688 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3689 Known = KnownBits::mulhu(Known, Known2);
3690 break;
3691 }
3692 case ISD::MULHS: {
3693 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3694 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3695 Known = KnownBits::mulhs(Known, Known2);
3696 break;
3697 }
3698 case ISD::ABDU: {
3699 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3700 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3701 Known = KnownBits::abdu(Known, Known2);
3702 break;
3703 }
3704 case ISD::ABDS: {
3705 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3706 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3707 Known = KnownBits::abds(Known, Known2);
3708 unsigned SignBits1 =
3709 ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3710 if (SignBits1 == 1)
3711 break;
3712 unsigned SignBits0 =
3713 ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3714 Known.Zero.setHighBits(std::min(SignBits0, SignBits1) - 1);
3715 break;
3716 }
3717 case ISD::UMUL_LOHI: {
3718 assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result");
3719 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3720 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3721 bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1);
3722 if (Op.getResNo() == 0)
3723 Known = KnownBits::mul(Known, Known2, SelfMultiply);
3724 else
3725 Known = KnownBits::mulhu(Known, Known2);
3726 break;
3727 }
3728 case ISD::SMUL_LOHI: {
3729 assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result");
3730 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3731 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3732 bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1);
3733 if (Op.getResNo() == 0)
3734 Known = KnownBits::mul(Known, Known2, SelfMultiply);
3735 else
3736 Known = KnownBits::mulhs(Known, Known2);
3737 break;
3738 }
3739 case ISD::AVGFLOORU: {
3740 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3741 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3742 Known = KnownBits::avgFloorU(Known, Known2);
3743 break;
3744 }
3745 case ISD::AVGCEILU: {
3746 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3747 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3748 Known = KnownBits::avgCeilU(Known, Known2);
3749 break;
3750 }
3751 case ISD::AVGFLOORS: {
3752 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3753 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3754 Known = KnownBits::avgFloorS(Known, Known2);
3755 break;
3756 }
3757 case ISD::AVGCEILS: {
3758 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3759 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3760 Known = KnownBits::avgCeilS(Known, Known2);
3761 break;
3762 }
3763 case ISD::SELECT:
3764 case ISD::VSELECT:
3765 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
3766 // If we don't know any bits, early out.
3767 if (Known.isUnknown())
3768 break;
3769 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1);
3770
3771 // Only known if known in both the LHS and RHS.
3772 Known = Known.intersectWith(Known2);
3773 break;
3774 case ISD::SELECT_CC:
3775 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1);
3776 // If we don't know any bits, early out.
3777 if (Known.isUnknown())
3778 break;
3779 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
3780
3781 // Only known if known in both the LHS and RHS.
3782 Known = Known.intersectWith(Known2);
3783 break;
3784 case ISD::SMULO:
3785 case ISD::UMULO:
3786 if (Op.getResNo() != 1)
3787 break;
3788 // The boolean result conforms to getBooleanContents.
3789 // If we know the result of a setcc has the top bits zero, use this info.
3790 // We know that we have an integer-based boolean since these operations
3791 // are only available for integer.
3792 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
3794 BitWidth > 1)
3795 Known.Zero.setBitsFrom(1);
3796 break;
3797 case ISD::SETCC:
3798 case ISD::SETCCCARRY:
3799 case ISD::STRICT_FSETCC:
3800 case ISD::STRICT_FSETCCS: {
3801 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
3802 // If we know the result of a setcc has the top bits zero, use this info.
3803 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
3805 BitWidth > 1)
3806 Known.Zero.setBitsFrom(1);
3807 break;
3808 }
3809 case ISD::SHL: {
3810 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3811 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3812
3813 bool NUW = Op->getFlags().hasNoUnsignedWrap();
3814 bool NSW = Op->getFlags().hasNoSignedWrap();
3815
3816 bool ShAmtNonZero = Known2.isNonZero();
3817
3818 Known = KnownBits::shl(Known, Known2, NUW, NSW, ShAmtNonZero);
3819
3820 // Minimum shift low bits are known zero.
3821 if (std::optional<unsigned> ShMinAmt =
3822 getValidMinimumShiftAmount(Op, DemandedElts, Depth + 1))
3823 Known.Zero.setLowBits(*ShMinAmt);
3824 break;
3825 }
3826 case ISD::SRL:
3827 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3828 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3829 Known = KnownBits::lshr(Known, Known2, /*ShAmtNonZero=*/false,
3830 Op->getFlags().hasExact());
3831
3832 // Minimum shift high bits are known zero.
3833 if (std::optional<unsigned> ShMinAmt =
3834 getValidMinimumShiftAmount(Op, DemandedElts, Depth + 1))
3835 Known.Zero.setHighBits(*ShMinAmt);
3836 break;
3837 case ISD::SRA:
3838 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3839 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3840 Known = KnownBits::ashr(Known, Known2, /*ShAmtNonZero=*/false,
3841 Op->getFlags().hasExact());
3842 break;
3843 case ISD::ROTL:
3844 case ISD::ROTR:
3845 if (ConstantSDNode *C =
3846 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) {
3847 unsigned Amt = C->getAPIntValue().urem(BitWidth);
3848
3849 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3850
3851 // Canonicalize to ROTR.
3852 if (Opcode == ISD::ROTL && Amt != 0)
3853 Amt = BitWidth - Amt;
3854
3855 Known.Zero = Known.Zero.rotr(Amt);
3856 Known.One = Known.One.rotr(Amt);
3857 }
3858 break;
3859 case ISD::FSHL:
3860 case ISD::FSHR:
3861 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) {
3862 unsigned Amt = C->getAPIntValue().urem(BitWidth);
3863
3864 // For fshl, 0-shift returns the 1st arg.
3865 // For fshr, 0-shift returns the 2nd arg.
3866 if (Amt == 0) {
3867 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1),
3868 DemandedElts, Depth + 1);
3869 break;
3870 }
3871
3872 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3873 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3874 const APInt ShAmt(BitWidth, Amt);
3875 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3876 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3877 Known = Opcode == ISD::FSHL ? KnownBits::fshl(Known, Known2, ShAmt)
3878 : KnownBits::fshr(Known, Known2, ShAmt);
3879 }
3880 break;
3881 case ISD::SHL_PARTS:
3882 case ISD::SRA_PARTS:
3883 case ISD::SRL_PARTS: {
3884 assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result");
3885
3886 // Collect lo/hi source values and concatenate.
3887 unsigned LoBits = Op.getOperand(0).getScalarValueSizeInBits();
3888 unsigned HiBits = Op.getOperand(1).getScalarValueSizeInBits();
3889 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3890 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3891 Known = Known2.concat(Known);
3892
3893 // Collect shift amount.
3894 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
3895
3896 if (Opcode == ISD::SHL_PARTS)
3897 Known = KnownBits::shl(Known, Known2);
3898 else if (Opcode == ISD::SRA_PARTS)
3899 Known = KnownBits::ashr(Known, Known2);
3900 else // if (Opcode == ISD::SRL_PARTS)
3901 Known = KnownBits::lshr(Known, Known2);
3902
3903 // TODO: Minimum shift low/high bits are known zero.
3904
3905 if (Op.getResNo() == 0)
3906 Known = Known.extractBits(LoBits, 0);
3907 else
3908 Known = Known.extractBits(HiBits, LoBits);
3909 break;
3910 }
3912 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3913 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3914 Known = Known.sextInReg(EVT.getScalarSizeInBits());
3915 break;
3916 }
3917 case ISD::CTTZ:
3918 case ISD::CTTZ_ZERO_POISON: {
3919 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3920 // If we have a known 1, its position is our upper bound.
3921 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
3922 unsigned LowBits = llvm::bit_width(PossibleTZ);
3923 Known.Zero.setBitsFrom(LowBits);
3924 break;
3925 }
3926 case ISD::CTLZ:
3927 case ISD::CTLZ_ZERO_POISON: {
3928 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3929 // If we have a known 1, its position is our upper bound.
3930 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
3931 unsigned LowBits = llvm::bit_width(PossibleLZ);
3932 Known.Zero.setBitsFrom(LowBits);
3933 break;
3934 }
3935 case ISD::CTLS: {
3936 unsigned MinRedundantSignBits =
3937 ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1) - 1;
3938 ConstantRange Range(APInt(BitWidth, MinRedundantSignBits),
3940 Known = Range.toKnownBits();
3941 break;
3942 }
3943 case ISD::CTPOP: {
3944 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3945 // If we know some of the bits are zero, they can't be one.
3946 unsigned PossibleOnes = Known2.countMaxPopulation();
3947 Known.Zero.setBitsFrom(llvm::bit_width(PossibleOnes));
3948 break;
3949 }
3950 case ISD::PARITY: {
3951 // Parity returns 0 everywhere but the LSB.
3952 Known.Zero.setBitsFrom(1);
3953 break;
3954 }
3955 case ISD::CLMUL: {
3956 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3957 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3958 Known = KnownBits::clmul(Known, Known2);
3959 break;
3960 }
3961 case ISD::MGATHER:
3962 case ISD::MLOAD: {
3963 ISD::LoadExtType ETy =
3964 (Opcode == ISD::MGATHER)
3965 ? cast<MaskedGatherSDNode>(Op)->getExtensionType()
3966 : cast<MaskedLoadSDNode>(Op)->getExtensionType();
3967 if (ETy == ISD::ZEXTLOAD) {
3968 EVT MemVT = cast<MemSDNode>(Op)->getMemoryVT();
3969 KnownBits Known0(MemVT.getScalarSizeInBits());
3970 return Known0.zext(BitWidth);
3971 }
3972 break;
3973 }
3974 case ISD::LOAD: {
3976 const Constant *Cst = TLI->getTargetConstantFromLoad(LD);
3977 if (ISD::isNON_EXTLoad(LD) && Cst) {
3978 // Determine any common known bits from the loaded constant pool value.
3979 Type *CstTy = Cst->getType();
3980 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits() &&
3981 !Op.getValueType().isScalableVector()) {
3982 // If its a vector splat, then we can (quickly) reuse the scalar path.
3983 // NOTE: We assume all elements match and none are UNDEF.
3984 if (CstTy->isVectorTy()) {
3985 if (const Constant *Splat = Cst->getSplatValue()) {
3986 Cst = Splat;
3987 CstTy = Cst->getType();
3988 }
3989 }
3990 // TODO - do we need to handle different bitwidths?
3991 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) {
3992 // Iterate across all vector elements finding common known bits.
3993 Known.setAllConflict();
3994 for (unsigned i = 0; i != NumElts; ++i) {
3995 if (!DemandedElts[i])
3996 continue;
3997 if (Constant *Elt = Cst->getAggregateElement(i)) {
3998 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
3999 const APInt &Value = CInt->getValue();
4000 Known.One &= Value;
4001 Known.Zero &= ~Value;
4002 continue;
4003 }
4004 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
4005 APInt Value = CFP->getValueAPF().bitcastToAPInt();
4006 Known.One &= Value;
4007 Known.Zero &= ~Value;
4008 continue;
4009 }
4010 }
4011 Known.One.clearAllBits();
4012 Known.Zero.clearAllBits();
4013 break;
4014 }
4015 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) {
4016 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
4017 Known = KnownBits::makeConstant(CInt->getValue());
4018 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
4019 Known =
4020 KnownBits::makeConstant(CFP->getValueAPF().bitcastToAPInt());
4021 }
4022 }
4023 }
4024 } else if (Op.getResNo() == 0) {
4025 unsigned ScalarMemorySize = LD->getMemoryVT().getScalarSizeInBits();
4026 KnownBits KnownScalarMemory(ScalarMemorySize);
4027 if (const MDNode *MD = LD->getRanges())
4028 computeKnownBitsFromRangeMetadata(*MD, KnownScalarMemory);
4029
4030 // Extend the Known bits from memory to the size of the scalar result.
4031 if (ISD::isZEXTLoad(Op.getNode()))
4032 Known = KnownScalarMemory.zext(BitWidth);
4033 else if (ISD::isSEXTLoad(Op.getNode()))
4034 Known = KnownScalarMemory.sext(BitWidth);
4035 else if (ISD::isEXTLoad(Op.getNode()))
4036 Known = KnownScalarMemory.anyext(BitWidth);
4037 else
4038 Known = KnownScalarMemory;
4039 assert(Known.getBitWidth() == BitWidth);
4040 return Known;
4041 }
4042 break;
4043 }
4045 if (Op.getValueType().isScalableVector())
4046 break;
4047 EVT InVT = Op.getOperand(0).getValueType();
4048 APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements());
4049 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
4050 Known = Known.zext(BitWidth);
4051 break;
4052 }
4053 case ISD::ZERO_EXTEND: {
4054 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4055 Known = Known.zext(BitWidth);
4056 break;
4057 }
4059 if (Op.getValueType().isScalableVector())
4060 break;
4061 EVT InVT = Op.getOperand(0).getValueType();
4062 APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements());
4063 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
4064 // If the sign bit is known to be zero or one, then sext will extend
4065 // it to the top bits, else it will just zext.
4066 Known = Known.sext(BitWidth);
4067 break;
4068 }
4069 case ISD::SIGN_EXTEND: {
4070 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4071 // If the sign bit is known to be zero or one, then sext will extend
4072 // it to the top bits, else it will just zext.
4073 Known = Known.sext(BitWidth);
4074 break;
4075 }
4077 if (Op.getValueType().isScalableVector())
4078 break;
4079 EVT InVT = Op.getOperand(0).getValueType();
4080 APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements());
4081 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
4082 Known = Known.anyext(BitWidth);
4083 break;
4084 }
4085 case ISD::ANY_EXTEND: {
4086 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4087 Known = Known.anyext(BitWidth);
4088 break;
4089 }
4090 case ISD::TRUNCATE: {
4091 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4092 Known = Known.trunc(BitWidth);
4093 break;
4094 }
4095 case ISD::TRUNCATE_SSAT_S: {
4096 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4097 Known = Known.truncSSat(BitWidth);
4098 break;
4099 }
4100 case ISD::TRUNCATE_SSAT_U: {
4101 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4102 Known = Known.truncSSatU(BitWidth);
4103 break;
4104 }
4105 case ISD::TRUNCATE_USAT_U: {
4106 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4107 Known = Known.truncUSat(BitWidth);
4108 break;
4109 }
4110 case ISD::AssertZext: {
4111 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
4113 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4114 Known.Zero |= (~InMask);
4115 Known.One &= (~Known.Zero);
4116 break;
4117 }
4118 case ISD::AssertAlign: {
4119 unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign());
4120 assert(LogOfAlign != 0);
4121
4122 // TODO: Should use maximum with source
4123 // If a node is guaranteed to be aligned, set low zero bits accordingly as
4124 // well as clearing one bits.
4125 Known.Zero.setLowBits(LogOfAlign);
4126 Known.One.clearLowBits(LogOfAlign);
4127 break;
4128 }
4129 case ISD::AssertNoFPClass: {
4130 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4131
4132 FPClassTest NoFPClass =
4133 static_cast<FPClassTest>(Op.getConstantOperandVal(1));
4134 const FPClassTest NegativeTestMask = fcNan | fcNegative;
4135 if ((NoFPClass & NegativeTestMask) == NegativeTestMask) {
4136 // Cannot be negative.
4137 Known.makeNonNegative();
4138 }
4139
4140 const FPClassTest PositiveTestMask = fcNan | fcPositive;
4141 if ((NoFPClass & PositiveTestMask) == PositiveTestMask) {
4142 // Cannot be positive.
4143 Known.makeNegative();
4144 }
4145
4146 break;
4147 }
4148 case ISD::FABS:
4149 // fabs clears the sign bit
4150 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4151 Known.makeNonNegative();
4152 break;
4153 case ISD::FGETSIGN:
4154 // All bits are zero except the low bit.
4155 Known.Zero.setBitsFrom(1);
4156 break;
4157 case ISD::ADD: {
4158 SDNodeFlags Flags = Op.getNode()->getFlags();
4159 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4160 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4161 bool SelfAdd = Op.getOperand(0) == Op.getOperand(1) &&
4163 Op.getOperand(0), DemandedElts,
4165 Known = KnownBits::add(Known, Known2, Flags.hasNoSignedWrap(),
4166 Flags.hasNoUnsignedWrap(), SelfAdd);
4167 break;
4168 }
4169 case ISD::SUB: {
4170 SDNodeFlags Flags = Op.getNode()->getFlags();
4171 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4172 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4173 Known = KnownBits::sub(Known, Known2, Flags.hasNoSignedWrap(),
4174 Flags.hasNoUnsignedWrap());
4175 break;
4176 }
4177 case ISD::USUBO:
4178 case ISD::SSUBO:
4179 case ISD::USUBO_CARRY:
4180 case ISD::SSUBO_CARRY:
4181 if (Op.getResNo() == 1) {
4182 // If we know the result of a setcc has the top bits zero, use this info.
4183 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
4185 BitWidth > 1)
4186 Known.Zero.setBitsFrom(1);
4187 break;
4188 }
4189 [[fallthrough]];
4190 case ISD::SUBC: {
4191 assert(Op.getResNo() == 0 &&
4192 "We only compute knownbits for the difference here.");
4193
4194 // With USUBO_CARRY and SSUBO_CARRY a borrow bit may be added in.
4195 KnownBits Borrow(1);
4196 if (Opcode == ISD::USUBO_CARRY || Opcode == ISD::SSUBO_CARRY) {
4197 Borrow = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
4198 // Borrow has bit width 1
4199 Borrow = Borrow.trunc(1);
4200 } else {
4201 Borrow.setAllZero();
4202 }
4203
4204 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4205 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4206 Known = KnownBits::computeForSubBorrow(Known, Known2, Borrow);
4207 break;
4208 }
4209 case ISD::UADDO:
4210 case ISD::SADDO:
4211 case ISD::UADDO_CARRY:
4212 case ISD::SADDO_CARRY:
4213 if (Op.getResNo() == 1) {
4214 // If we know the result of a setcc has the top bits zero, use this info.
4215 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
4217 BitWidth > 1)
4218 Known.Zero.setBitsFrom(1);
4219 break;
4220 }
4221 [[fallthrough]];
4222 case ISD::ADDC:
4223 case ISD::ADDE: {
4224 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here.");
4225
4226 // With ADDE and UADDO_CARRY, a carry bit may be added in.
4227 KnownBits Carry(1);
4228 if (Opcode == ISD::ADDE)
4229 // Can't track carry from glue, set carry to unknown.
4230 Carry.resetAll();
4231 else if (Opcode == ISD::UADDO_CARRY || Opcode == ISD::SADDO_CARRY) {
4232 Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
4233 // Carry has bit width 1
4234 Carry = Carry.trunc(1);
4235 } else {
4236 Carry.setAllZero();
4237 }
4238
4239 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4240 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4241 Known = KnownBits::computeForAddCarry(Known, Known2, Carry);
4242 break;
4243 }
4244 case ISD::UDIV: {
4245 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4246 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4247 Known = KnownBits::udiv(Known, Known2, Op->getFlags().hasExact());
4248 break;
4249 }
4250 case ISD::SDIV: {
4251 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4252 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4253 Known = KnownBits::sdiv(Known, Known2, Op->getFlags().hasExact());
4254 break;
4255 }
4256 case ISD::SREM: {
4257 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4258 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4259 Known = KnownBits::srem(Known, Known2);
4260 break;
4261 }
4262 case ISD::UREM: {
4263 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4264 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4265 Known = KnownBits::urem(Known, Known2);
4266 break;
4267 }
4268 case ISD::EXTRACT_ELEMENT: {
4269 Known = computeKnownBits(Op.getOperand(0), Depth+1);
4270 const unsigned Index = Op.getConstantOperandVal(1);
4271 const unsigned EltBitWidth = Op.getValueSizeInBits();
4272
4273 // Remove low part of known bits mask
4274 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
4275 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
4276
4277 // Remove high part of known bit mask
4278 Known = Known.trunc(EltBitWidth);
4279 break;
4280 }
4282 SDValue InVec = Op.getOperand(0);
4283 SDValue EltNo = Op.getOperand(1);
4284 EVT VecVT = InVec.getValueType();
4285 // computeKnownBits not yet implemented for scalable vectors.
4286 if (VecVT.isScalableVector())
4287 break;
4288 const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
4289 const unsigned NumSrcElts = VecVT.getVectorNumElements();
4290
4291 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
4292 // anything about the extended bits.
4293 if (BitWidth > EltBitWidth)
4294 Known = Known.trunc(EltBitWidth);
4295
4296 // If we know the element index, just demand that vector element, else for
4297 // an unknown element index, ignore DemandedElts and demand them all.
4298 APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts);
4299 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
4300 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
4301 DemandedSrcElts =
4302 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
4303
4304 Known = computeKnownBits(InVec, DemandedSrcElts, Depth + 1);
4305 if (BitWidth > EltBitWidth)
4306 Known = Known.anyext(BitWidth);
4307 break;
4308 }
4310 if (Op.getValueType().isScalableVector())
4311 break;
4312
4313 // If we know the element index, split the demand between the
4314 // source vector and the inserted element, otherwise assume we need
4315 // the original demanded vector elements and the value.
4316 SDValue InVec = Op.getOperand(0);
4317 SDValue InVal = Op.getOperand(1);
4318 SDValue EltNo = Op.getOperand(2);
4319 bool DemandedVal = true;
4320 APInt DemandedVecElts = DemandedElts;
4321 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
4322 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
4323 unsigned EltIdx = CEltNo->getZExtValue();
4324 DemandedVal = !!DemandedElts[EltIdx];
4325 DemandedVecElts.clearBit(EltIdx);
4326 }
4327 Known.setAllConflict();
4328 if (DemandedVal) {
4329 Known2 = computeKnownBits(InVal, Depth + 1);
4330 Known = Known.intersectWith(Known2.zextOrTrunc(BitWidth));
4331 }
4332 if (!!DemandedVecElts) {
4333 Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1);
4334 Known = Known.intersectWith(Known2);
4335 }
4336 break;
4337 }
4338 case ISD::BITREVERSE: {
4339 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4340 Known = Known2.reverseBits();
4341 break;
4342 }
4343 case ISD::BSWAP: {
4344 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4345 Known = Known2.byteSwap();
4346 break;
4347 }
4348 case ISD::ABS:
4349 case ISD::ABS_MIN_POISON: {
4350 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4351 Known = Known2.abs();
4352 Known.Zero.setHighBits(
4353 ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1) - 1);
4354 break;
4355 }
4356 case ISD::USUBSAT: {
4357 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4358 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4359 Known = KnownBits::usub_sat(Known, Known2);
4360 break;
4361 }
4362 case ISD::UMIN: {
4363 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4364 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4365 Known = KnownBits::umin(Known, Known2);
4366 break;
4367 }
4368 case ISD::UMAX: {
4369 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4370 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4371 Known = KnownBits::umax(Known, Known2);
4372 break;
4373 }
4374 case ISD::SMIN:
4375 case ISD::SMAX: {
4376 // If we have a clamp pattern, we know that the number of sign bits will be
4377 // the minimum of the clamp min/max range.
4378 bool IsMax = (Opcode == ISD::SMAX);
4379 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
4380 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
4381 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
4382 CstHigh =
4383 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
4384 if (CstLow && CstHigh) {
4385 if (!IsMax)
4386 std::swap(CstLow, CstHigh);
4387
4388 const APInt &ValueLow = CstLow->getAPIntValue();
4389 const APInt &ValueHigh = CstHigh->getAPIntValue();
4390 if (ValueLow.sle(ValueHigh)) {
4391 unsigned LowSignBits = ValueLow.getNumSignBits();
4392 unsigned HighSignBits = ValueHigh.getNumSignBits();
4393 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
4394 if (ValueLow.isNegative() && ValueHigh.isNegative()) {
4395 Known.One.setHighBits(MinSignBits);
4396 break;
4397 }
4398 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) {
4399 Known.Zero.setHighBits(MinSignBits);
4400 break;
4401 }
4402 }
4403 }
4404
4405 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4406 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4407 if (IsMax)
4408 Known = KnownBits::smax(Known, Known2);
4409 else
4410 Known = KnownBits::smin(Known, Known2);
4411
4412 // For SMAX, if CstLow is non-negative we know the result will be
4413 // non-negative and thus all sign bits are 0.
4414 // TODO: There's an equivalent of this for smin with negative constant for
4415 // known ones.
4416 if (IsMax && CstLow) {
4417 const APInt &ValueLow = CstLow->getAPIntValue();
4418 if (ValueLow.isNonNegative()) {
4419 unsigned SignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4420 Known.Zero.setHighBits(std::min(SignBits, ValueLow.getNumSignBits()));
4421 }
4422 }
4423
4424 break;
4425 }
4426 case ISD::UINT_TO_FP: {
4427 Known.makeNonNegative();
4428 break;
4429 }
4430 case ISD::SINT_TO_FP: {
4431 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4432 if (Known2.isNonNegative())
4433 Known.makeNonNegative();
4434 else if (Known2.isNegative())
4435 Known.makeNegative();
4436 break;
4437 }
4438 case ISD::FP_TO_UINT_SAT: {
4439 // FP_TO_UINT_SAT produces an unsigned value that fits in the saturating VT.
4440 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
4442 break;
4443 }
4444 case ISD::ATOMIC_LOAD: {
4445 // If we are looking at the loaded value.
4446 if (Op.getResNo() == 0) {
4447 auto *AT = cast<AtomicSDNode>(Op);
4448 unsigned ScalarMemorySize = AT->getMemoryVT().getScalarSizeInBits();
4449 KnownBits KnownScalarMemory(ScalarMemorySize);
4450 if (const MDNode *MD = AT->getRanges())
4451 computeKnownBitsFromRangeMetadata(*MD, KnownScalarMemory);
4452
4453 switch (AT->getExtensionType()) {
4454 case ISD::ZEXTLOAD:
4455 Known = KnownScalarMemory.zext(BitWidth);
4456 break;
4457 case ISD::SEXTLOAD:
4458 Known = KnownScalarMemory.sext(BitWidth);
4459 break;
4460 case ISD::EXTLOAD:
4461 switch (TLI->getExtendForAtomicOps()) {
4462 case ISD::ZERO_EXTEND:
4463 Known = KnownScalarMemory.zext(BitWidth);
4464 break;
4465 case ISD::SIGN_EXTEND:
4466 Known = KnownScalarMemory.sext(BitWidth);
4467 break;
4468 default:
4469 Known = KnownScalarMemory.anyext(BitWidth);
4470 break;
4471 }
4472 break;
4473 case ISD::NON_EXTLOAD:
4474 Known = KnownScalarMemory;
4475 break;
4476 }
4477 assert(Known.getBitWidth() == BitWidth);
4478 }
4479 break;
4480 }
4482 if (Op.getResNo() == 1) {
4483 // The boolean result conforms to getBooleanContents.
4484 // If we know the result of a setcc has the top bits zero, use this info.
4485 // We know that we have an integer-based boolean since these operations
4486 // are only available for integer.
4487 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
4489 BitWidth > 1)
4490 Known.Zero.setBitsFrom(1);
4491 break;
4492 }
4493 [[fallthrough]];
4495 case ISD::ATOMIC_SWAP:
4506 case ISD::ATOMIC_LOAD_UMAX: {
4507 // If we are looking at the loaded value.
4508 if (Op.getResNo() == 0) {
4509 auto *AT = cast<AtomicSDNode>(Op);
4510 unsigned MemBits = AT->getMemoryVT().getScalarSizeInBits();
4511
4512 if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND)
4513 Known.Zero.setBitsFrom(MemBits);
4514 }
4515 break;
4516 }
4517 case ISD::FrameIndex:
4519 TLI->computeKnownBitsForFrameIndex(cast<FrameIndexSDNode>(Op)->getIndex(),
4520 Known, getMachineFunction());
4521 break;
4522
4523 default:
4524 if (Opcode < ISD::BUILTIN_OP_END)
4525 break;
4526 [[fallthrough]];
4530 // TODO: Probably okay to remove after audit; here to reduce change size
4531 // in initial enablement patch for scalable vectors
4532 if (Op.getValueType().isScalableVector())
4533 break;
4534
4535 // Allow the target to implement this method for its nodes.
4536 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth);
4537 break;
4538 }
4539
4540 return Known;
4541}
4542
4543/// Convert ConstantRange OverflowResult into SelectionDAG::OverflowKind.
4556
4559 // X + 0 never overflow
4560 if (isNullConstant(N1))
4561 return OFK_Never;
4562
4563 // If both operands each have at least two sign bits, the addition
4564 // cannot overflow.
4565 if (ComputeNumSignBits(N0) > 1 && ComputeNumSignBits(N1) > 1)
4566 return OFK_Never;
4567
4568 // TODO: Add ConstantRange::signedAddMayOverflow handling.
4569 return OFK_Sometime;
4570}
4571
4574 // X + 0 never overflow
4575 if (isNullConstant(N1))
4576 return OFK_Never;
4577
4578 // mulhi + 1 never overflow
4579 KnownBits N1Known = computeKnownBits(N1);
4580 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 &&
4581 N1Known.getMaxValue().ult(2))
4582 return OFK_Never;
4583
4584 KnownBits N0Known = computeKnownBits(N0);
4585 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1 &&
4586 N0Known.getMaxValue().ult(2))
4587 return OFK_Never;
4588
4589 // Fallback to ConstantRange::unsignedAddMayOverflow handling.
4590 ConstantRange N0Range = ConstantRange::fromKnownBits(N0Known, false);
4591 ConstantRange N1Range = ConstantRange::fromKnownBits(N1Known, false);
4592 return mapOverflowResult(N0Range.unsignedAddMayOverflow(N1Range));
4593}
4594
4597 // X - 0 never overflow
4598 if (isNullConstant(N1))
4599 return OFK_Never;
4600
4601 // If both operands each have at least two sign bits, the subtraction
4602 // cannot overflow.
4603 if (ComputeNumSignBits(N0) > 1 && ComputeNumSignBits(N1) > 1)
4604 return OFK_Never;
4605
4606 KnownBits N0Known = computeKnownBits(N0);
4607 KnownBits N1Known = computeKnownBits(N1);
4608 ConstantRange N0Range = ConstantRange::fromKnownBits(N0Known, true);
4609 ConstantRange N1Range = ConstantRange::fromKnownBits(N1Known, true);
4610 return mapOverflowResult(N0Range.signedSubMayOverflow(N1Range));
4611}
4612
4615 // X - 0 never overflow
4616 if (isNullConstant(N1))
4617 return OFK_Never;
4618
4619 ConstantRange N0Range =
4620 computeConstantRangeIncludingKnownBits(N0, /*ForSigned=*/false);
4621 ConstantRange N1Range =
4622 computeConstantRangeIncludingKnownBits(N1, /*ForSigned=*/false);
4623 return mapOverflowResult(N0Range.unsignedSubMayOverflow(N1Range));
4624}
4625
4628 // X * 0 and X * 1 never overflow.
4629 if (isNullConstant(N1) || isOneConstant(N1))
4630 return OFK_Never;
4631
4634 return mapOverflowResult(N0Range.unsignedMulMayOverflow(N1Range));
4635}
4636
4639 // X * 0 and X * 1 never overflow.
4640 if (isNullConstant(N1) || isOneConstant(N1))
4641 return OFK_Never;
4642
4643 // Get the size of the result.
4644 unsigned BitWidth = N0.getScalarValueSizeInBits();
4645
4646 // Sum of the sign bits.
4647 unsigned SignBits = ComputeNumSignBits(N0) + ComputeNumSignBits(N1);
4648
4649 // If we have enough sign bits, then there's no overflow.
4650 if (SignBits > BitWidth + 1)
4651 return OFK_Never;
4652
4653 if (SignBits == BitWidth + 1) {
4654 // The overflow occurs when the true multiplication of the
4655 // the operands is the minimum negative number.
4656 KnownBits N0Known = computeKnownBits(N0);
4657 KnownBits N1Known = computeKnownBits(N1);
4658 // If one of the operands is non-negative, then there's no
4659 // overflow.
4660 if (N0Known.isNonNegative() || N1Known.isNonNegative())
4661 return OFK_Never;
4662 }
4663
4664 return OFK_Sometime;
4665}
4666
4668 unsigned Depth) const {
4669 APInt DemandedElts = getDemandAllEltsMask(Op);
4670 return computeConstantRange(Op, DemandedElts, ForSigned, Depth);
4671}
4672
4674 const APInt &DemandedElts,
4675 bool ForSigned,
4676 unsigned Depth) const {
4677 EVT VT = Op.getValueType();
4678 unsigned BitWidth = VT.getScalarSizeInBits();
4679
4680 if (Depth >= MaxRecursionDepth)
4681 return ConstantRange::getFull(BitWidth);
4682
4683 if (ConstantSDNode *C = isConstOrConstSplat(Op, DemandedElts))
4684 return ConstantRange(C->getAPIntValue());
4685
4686 unsigned Opcode = Op.getOpcode();
4687 switch (Opcode) {
4688 case ISD::VSCALE: {
4690 const APInt &Multiplier = Op.getConstantOperandAPInt(0);
4691 return getVScaleRange(&F, BitWidth).multiply(Multiplier);
4692 }
4693 default:
4694 break;
4695 }
4696
4697 return ConstantRange::getFull(BitWidth);
4698}
4699
4702 unsigned Depth) const {
4703 APInt DemandedElts = getDemandAllEltsMask(Op);
4704 return computeConstantRangeIncludingKnownBits(Op, DemandedElts, ForSigned,
4705 Depth);
4706}
4707
4709 SDValue Op, const APInt &DemandedElts, bool ForSigned,
4710 unsigned Depth) const {
4711 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
4712 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4713 ConstantRange CR2 = computeConstantRange(Op, DemandedElts, ForSigned, Depth);
4716 return CR1.intersectWith(CR2, RangeType);
4717}
4718
4720 unsigned Depth) const {
4721 APInt DemandedElts = getDemandAllEltsMask(Val);
4722 return isKnownToBeAPowerOfTwo(Val, DemandedElts, OrZero, Depth);
4723}
4724
4726 const APInt &DemandedElts,
4727 bool OrZero, unsigned Depth) const {
4728 if (Depth >= MaxRecursionDepth)
4729 return false; // Limit search depth.
4730
4731 EVT OpVT = Val.getValueType();
4732 unsigned BitWidth = OpVT.getScalarSizeInBits();
4733 [[maybe_unused]] unsigned NumElts = DemandedElts.getBitWidth();
4734 assert((!OpVT.isScalableVector() || NumElts == 1) &&
4735 "DemandedElts for scalable vectors must be 1 to represent all lanes");
4736 assert(
4737 (!OpVT.isFixedLengthVector() || NumElts == OpVT.getVectorNumElements()) &&
4738 "Unexpected vector size");
4739
4740 auto IsPowerOfTwoOrZero = [BitWidth, OrZero](const ConstantSDNode *C) {
4741 APInt V = C->getAPIntValue().zextOrTrunc(BitWidth);
4742 return (OrZero && V.isZero()) || V.isPowerOf2();
4743 };
4744
4745 // Is the constant a known power of 2 or zero?
4746 if (ISD::matchUnaryPredicate(Val, IsPowerOfTwoOrZero))
4747 return true;
4748
4749 switch (Val.getOpcode()) {
4750 case ISD::BUILD_VECTOR:
4751 // Are all operands of a build vector constant powers of two or zero?
4752 if (all_of(enumerate(Val->ops()), [&](auto P) {
4753 auto *C = dyn_cast<ConstantSDNode>(P.value());
4754 return !DemandedElts[P.index()] || (C && IsPowerOfTwoOrZero(C));
4755 }))
4756 return true;
4757 break;
4758
4759 case ISD::SPLAT_VECTOR:
4760 // Is the operand of a splat vector a constant power of two?
4761 if (auto *C = dyn_cast<ConstantSDNode>(Val->getOperand(0)))
4762 if (IsPowerOfTwoOrZero(C))
4763 return true;
4764 break;
4765
4767 SDValue InVec = Val.getOperand(0);
4768 SDValue EltNo = Val.getOperand(1);
4769 EVT VecVT = InVec.getValueType();
4770
4771 // Skip scalable vectors or implicit extensions.
4772 if (VecVT.isScalableVector() ||
4773 OpVT.getScalarSizeInBits() != VecVT.getScalarSizeInBits())
4774 break;
4775
4776 // If we know the element index, just demand that vector element, else for
4777 // an unknown element index, ignore DemandedElts and demand them all.
4778 const unsigned NumSrcElts = VecVT.getVectorNumElements();
4779 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
4780 APInt DemandedSrcElts =
4781 ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)
4782 ? APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue())
4783 : APInt::getAllOnes(NumSrcElts);
4784 return isKnownToBeAPowerOfTwo(InVec, DemandedSrcElts, OrZero, Depth + 1);
4785 }
4786
4787 case ISD::AND: {
4788 // Looking for `x & -x` pattern:
4789 // If x == 0:
4790 // x & -x -> 0
4791 // If x != 0:
4792 // x & -x -> non-zero pow2
4793 // so if we find the pattern return whether we know `x` is non-zero.
4794 SDValue X, Z;
4795 if (sd_match(Val, m_And(m_Value(X), m_Neg(m_Deferred(X)))) ||
4796 (sd_match(Val, m_And(m_Value(X), m_Sub(m_Value(Z), m_Deferred(X)))) &&
4797 MaskedVectorIsZero(Z, DemandedElts, Depth + 1)))
4798 return OrZero || isKnownNeverZero(X, DemandedElts, Depth);
4799 break;
4800 }
4801
4802 case ISD::SHL: {
4803 // A left-shift of a constant one will have exactly one bit set because
4804 // shifting the bit off the end is undefined.
4805 auto *C = isConstOrConstSplat(Val.getOperand(0), DemandedElts);
4806 if (C && C->getAPIntValue() == 1)
4807 return true;
4808 return (OrZero || isKnownNeverZero(Val, DemandedElts, Depth)) &&
4809 isKnownToBeAPowerOfTwo(Val.getOperand(0), DemandedElts, OrZero,
4810 Depth + 1);
4811 }
4812
4813 case ISD::SRL: {
4814 // A logical right-shift of a constant sign-bit will have exactly
4815 // one bit set.
4816 auto *C = isConstOrConstSplat(Val.getOperand(0), DemandedElts);
4817 if (C && C->getAPIntValue().isSignMask())
4818 return true;
4819 return (OrZero || isKnownNeverZero(Val, DemandedElts, Depth)) &&
4820 isKnownToBeAPowerOfTwo(Val.getOperand(0), DemandedElts, OrZero,
4821 Depth + 1);
4822 }
4823
4824 case ISD::TRUNCATE:
4825 return (OrZero || isKnownNeverZero(Val, DemandedElts, Depth)) &&
4826 isKnownToBeAPowerOfTwo(Val.getOperand(0), DemandedElts, OrZero,
4827 Depth + 1);
4828
4829 case ISD::ROTL:
4830 case ISD::ROTR:
4831 return isKnownToBeAPowerOfTwo(Val.getOperand(0), DemandedElts, OrZero,
4832 Depth + 1);
4833 case ISD::BSWAP:
4834 case ISD::BITREVERSE:
4835 return isKnownToBeAPowerOfTwo(Val.getOperand(0), DemandedElts, OrZero,
4836 Depth + 1);
4837
4838 case ISD::SMIN:
4839 case ISD::SMAX:
4840 case ISD::UMIN:
4841 case ISD::UMAX:
4842 return isKnownToBeAPowerOfTwo(Val.getOperand(1), DemandedElts, OrZero,
4843 Depth + 1) &&
4844 isKnownToBeAPowerOfTwo(Val.getOperand(0), DemandedElts, OrZero,
4845 Depth + 1);
4846
4847 case ISD::SELECT:
4848 case ISD::VSELECT:
4849 return isKnownToBeAPowerOfTwo(Val.getOperand(2), DemandedElts, OrZero,
4850 Depth + 1) &&
4851 isKnownToBeAPowerOfTwo(Val.getOperand(1), DemandedElts, OrZero,
4852 Depth + 1);
4853
4854 case ISD::ZERO_EXTEND:
4855 return isKnownToBeAPowerOfTwo(Val.getOperand(0), /*OrZero=*/false,
4856 Depth + 1);
4857
4858 case ISD::VSCALE:
4859 // vscale(power-of-two) is a power-of-two
4860 return isKnownToBeAPowerOfTwo(Val.getOperand(0), /*OrZero=*/false,
4861 Depth + 1);
4862
4863 case ISD::VECTOR_SHUFFLE: {
4865 // Demanded elements with undef shuffle mask elements are unknown
4866 // - we cannot guarantee they are a power of two, so return false.
4867 APInt DemandedLHS, DemandedRHS;
4869 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
4870 if (!getShuffleDemandedElts(NumElts, SVN->getMask(), DemandedElts,
4871 DemandedLHS, DemandedRHS))
4872 return false;
4873
4874 // All demanded elements from LHS must be known power of two.
4875 if (!!DemandedLHS && !isKnownToBeAPowerOfTwo(Val.getOperand(0), DemandedLHS,
4876 OrZero, Depth + 1))
4877 return false;
4878
4879 // All demanded elements from RHS must be known power of two.
4880 if (!!DemandedRHS && !isKnownToBeAPowerOfTwo(Val.getOperand(1), DemandedRHS,
4881 OrZero, Depth + 1))
4882 return false;
4883
4884 return true;
4885 }
4886 }
4887
4888 // More could be done here, though the above checks are enough
4889 // to handle some common cases.
4890 return false;
4891}
4892
4894 if (ConstantFPSDNode *C1 = isConstOrConstSplatFP(Val, true))
4895 return C1->getValueAPF().getExactLog2Abs() >= 0;
4896
4897 if (Val.getOpcode() == ISD::UINT_TO_FP || Val.getOpcode() == ISD::SINT_TO_FP)
4898 return isKnownToBeAPowerOfTwo(Val.getOperand(0), Depth + 1);
4899
4900 return false;
4901}
4902
4904 APInt DemandedElts = getDemandAllEltsMask(Op);
4905 return ComputeNumSignBits(Op, DemandedElts, Depth);
4906}
4907
4908unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
4909 unsigned Depth) const {
4910 EVT VT = Op.getValueType();
4911 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!");
4912 unsigned VTBits = VT.getScalarSizeInBits();
4913 unsigned NumElts = DemandedElts.getBitWidth();
4914 unsigned Tmp, Tmp2;
4915 unsigned FirstAnswer = 1;
4916
4917 assert((!VT.isScalableVector() || NumElts == 1) &&
4918 "DemandedElts for scalable vectors must be 1 to represent all lanes");
4919
4920 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
4921 const APInt &Val = C->getAPIntValue();
4922 return Val.getNumSignBits();
4923 }
4924
4925 if (Depth >= MaxRecursionDepth)
4926 return 1; // Limit search depth.
4927
4928 if (!DemandedElts)
4929 return 1; // No demanded elts, better to assume we don't know anything.
4930
4931 unsigned Opcode = Op.getOpcode();
4932 switch (Opcode) {
4933 default: break;
4934 case ISD::AssertSext:
4935 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
4936 return VTBits-Tmp+1;
4937 case ISD::AssertZext:
4938 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
4939 return VTBits-Tmp;
4940 case ISD::FREEZE:
4941 if (isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), DemandedElts,
4943 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4944 break;
4945 case ISD::MERGE_VALUES:
4946 return ComputeNumSignBits(Op.getOperand(Op.getResNo()), DemandedElts,
4947 Depth + 1);
4948 case ISD::SPLAT_VECTOR: {
4949 // Check if the sign bits of source go down as far as the truncated value.
4950 unsigned NumSrcBits = Op.getOperand(0).getValueSizeInBits();
4951 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4952 if (NumSrcSignBits > (NumSrcBits - VTBits))
4953 return NumSrcSignBits - (NumSrcBits - VTBits);
4954 break;
4955 }
4956 case ISD::BUILD_VECTOR:
4957 assert(!VT.isScalableVector());
4958 Tmp = VTBits;
4959 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) {
4960 if (!DemandedElts[i])
4961 continue;
4962
4963 SDValue SrcOp = Op.getOperand(i);
4964 // BUILD_VECTOR can implicitly truncate sources, we handle this specially
4965 // for constant nodes to ensure we only look at the sign bits.
4967 APInt T = C->getAPIntValue().trunc(VTBits);
4968 Tmp2 = T.getNumSignBits();
4969 } else {
4970 Tmp2 = ComputeNumSignBits(SrcOp, Depth + 1);
4971
4972 if (SrcOp.getValueSizeInBits() != VTBits) {
4973 assert(SrcOp.getValueSizeInBits() > VTBits &&
4974 "Expected BUILD_VECTOR implicit truncation");
4975 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits;
4976 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
4977 }
4978 }
4979 Tmp = std::min(Tmp, Tmp2);
4980 }
4981 return Tmp;
4982
4983 case ISD::VECTOR_COMPRESS: {
4984 SDValue Vec = Op.getOperand(0);
4985 SDValue PassThru = Op.getOperand(2);
4986 Tmp = ComputeNumSignBits(PassThru, DemandedElts, Depth + 1);
4987 if (Tmp == 1)
4988 return 1;
4989 Tmp2 = ComputeNumSignBits(Vec, Depth + 1);
4990 Tmp = std::min(Tmp, Tmp2);
4991 return Tmp;
4992 }
4993
4994 case ISD::VECTOR_SHUFFLE: {
4995 // Collect the minimum number of sign bits that are shared by every vector
4996 // element referenced by the shuffle.
4997 APInt DemandedLHS, DemandedRHS;
4999 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
5000 if (!getShuffleDemandedElts(NumElts, SVN->getMask(), DemandedElts,
5001 DemandedLHS, DemandedRHS))
5002 return 1;
5003
5004 Tmp = std::numeric_limits<unsigned>::max();
5005 if (!!DemandedLHS)
5006 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
5007 if (!!DemandedRHS) {
5008 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
5009 Tmp = std::min(Tmp, Tmp2);
5010 }
5011 // If we don't know anything, early out and try computeKnownBits fall-back.
5012 if (Tmp == 1)
5013 break;
5014 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
5015 return Tmp;
5016 }
5017
5018 case ISD::BITCAST: {
5019 if (VT.isScalableVector())
5020 break;
5021 SDValue N0 = Op.getOperand(0);
5022 EVT SrcVT = N0.getValueType();
5023 unsigned SrcBits = SrcVT.getScalarSizeInBits();
5024
5025 // Ignore bitcasts from unsupported types..
5026 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint()))
5027 break;
5028
5029 // Fast handling of 'identity' bitcasts.
5030 if (VTBits == SrcBits)
5031 return ComputeNumSignBits(N0, DemandedElts, Depth + 1);
5032
5033 bool IsLE = getDataLayout().isLittleEndian();
5034
5035 // Bitcast 'large element' scalar/vector to 'small element' vector.
5036 if ((SrcBits % VTBits) == 0) {
5037 assert(VT.isVector() && "Expected bitcast to vector");
5038
5039 unsigned Scale = SrcBits / VTBits;
5040 APInt SrcDemandedElts =
5041 APIntOps::ScaleBitMask(DemandedElts, NumElts / Scale);
5042
5043 // Fast case - sign splat can be simply split across the small elements.
5044 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1);
5045 if (Tmp == SrcBits)
5046 return VTBits;
5047
5048 // Slow case - determine how far the sign extends into each sub-element.
5049 Tmp2 = VTBits;
5050 for (unsigned i = 0; i != NumElts; ++i)
5051 if (DemandedElts[i]) {
5052 unsigned SubOffset = i % Scale;
5053 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
5054 SubOffset = SubOffset * VTBits;
5055 if (Tmp <= SubOffset)
5056 return 1;
5057 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
5058 }
5059 return Tmp2;
5060 }
5061 break;
5062 }
5063
5065 // FP_TO_SINT_SAT produces a signed value that fits in the saturating VT.
5066 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
5067 return VTBits - Tmp + 1;
5068 case ISD::SIGN_EXTEND:
5069 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
5070 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp;
5072 // Max of the input and what this extends.
5073 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
5074 Tmp = VTBits-Tmp+1;
5075 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
5076 return std::max(Tmp, Tmp2);
5078 if (VT.isScalableVector())
5079 break;
5080 SDValue Src = Op.getOperand(0);
5081 EVT SrcVT = Src.getValueType();
5082 APInt DemandedSrcElts = DemandedElts.zext(SrcVT.getVectorNumElements());
5083 Tmp = VTBits - SrcVT.getScalarSizeInBits();
5084 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp;
5085 }
5086 case ISD::SRA:
5087 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
5088 // SRA X, C -> adds C sign bits.
5089 if (std::optional<unsigned> ShAmt =
5090 getValidMinimumShiftAmount(Op, DemandedElts, Depth + 1))
5091 Tmp = std::min(Tmp + *ShAmt, VTBits);
5092 return Tmp;
5093 case ISD::SHL:
5094 if (std::optional<ConstantRange> ShAmtRange =
5095 getValidShiftAmountRange(Op, DemandedElts, Depth + 1)) {
5096 unsigned MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
5097 unsigned MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
5098 // Try to look through ZERO/SIGN/ANY_EXTEND. If all extended bits are
5099 // shifted out, then we can compute the number of sign bits for the
5100 // operand being extended. A future improvement could be to pass along the
5101 // "shifted left by" information in the recursive calls to
5102 // ComputeKnownSignBits. Allowing us to handle this more generically.
5103 if (ISD::isExtOpcode(Op.getOperand(0).getOpcode())) {
5104 SDValue Ext = Op.getOperand(0);
5105 EVT ExtVT = Ext.getValueType();
5106 SDValue Extendee = Ext.getOperand(0);
5107 EVT ExtendeeVT = Extendee.getValueType();
5108 unsigned SizeDifference =
5109 ExtVT.getScalarSizeInBits() - ExtendeeVT.getScalarSizeInBits();
5110 if (SizeDifference <= MinShAmt) {
5111 Tmp = SizeDifference +
5112 ComputeNumSignBits(Extendee, DemandedElts, Depth + 1);
5113 if (MaxShAmt < Tmp)
5114 return Tmp - MaxShAmt;
5115 }
5116 }
5117 // shl destroys sign bits, ensure it doesn't shift out all sign bits.
5118 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
5119 if (MaxShAmt < Tmp)
5120 return Tmp - MaxShAmt;
5121 }
5122 break;
5123 case ISD::AND:
5124 case ISD::OR:
5125 case ISD::XOR: // NOT is handled here.
5126 // Logical binary ops preserve the number of sign bits at the worst.
5127 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
5128 if (Tmp != 1) {
5129 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
5130 FirstAnswer = std::min(Tmp, Tmp2);
5131 // We computed what we know about the sign bits as our first
5132 // answer. Now proceed to the generic code that uses
5133 // computeKnownBits, and pick whichever answer is better.
5134 }
5135 break;
5136
5137 case ISD::SELECT:
5138 case ISD::VSELECT:
5139 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
5140 if (Tmp == 1) return 1; // Early out.
5141 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
5142 return std::min(Tmp, Tmp2);
5143 case ISD::SELECT_CC:
5144 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
5145 if (Tmp == 1) return 1; // Early out.
5146 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1);
5147 return std::min(Tmp, Tmp2);
5148
5149 case ISD::SMIN:
5150 case ISD::SMAX: {
5151 // If we have a clamp pattern, we know that the number of sign bits will be
5152 // the minimum of the clamp min/max range.
5153 bool IsMax = (Opcode == ISD::SMAX);
5154 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
5155 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
5156 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
5157 CstHigh =
5158 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
5159 if (CstLow && CstHigh) {
5160 if (!IsMax)
5161 std::swap(CstLow, CstHigh);
5162 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) {
5163 Tmp = CstLow->getAPIntValue().getNumSignBits();
5164 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
5165 return std::min(Tmp, Tmp2);
5166 }
5167 }
5168
5169 // Fallback - just get the minimum number of sign bits of the operands.
5170 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
5171 if (Tmp == 1)
5172 return 1; // Early out.
5173 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
5174 return std::min(Tmp, Tmp2);
5175 }
5176 case ISD::UMIN:
5177 case ISD::UMAX:
5178 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
5179 if (Tmp == 1)
5180 return 1; // Early out.
5181 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
5182 return std::min(Tmp, Tmp2);
5183 case ISD::SSUBO_CARRY:
5184 case ISD::USUBO_CARRY:
5185 // sub_carry(x,x,c) -> 0/-1 (sext carry)
5186 if (Op.getResNo() == 0 && Op.getOperand(0) == Op.getOperand(1))
5187 return VTBits;
5188 [[fallthrough]];
5189 case ISD::SADDO:
5190 case ISD::UADDO:
5191 case ISD::SADDO_CARRY:
5192 case ISD::UADDO_CARRY:
5193 case ISD::SSUBO:
5194 case ISD::USUBO:
5195 case ISD::SMULO:
5196 case ISD::UMULO:
5197 if (Op.getResNo() != 1)
5198 break;
5199 // The boolean result conforms to getBooleanContents. Fall through.
5200 // If setcc returns 0/-1, all bits are sign bits.
5201 // We know that we have an integer-based boolean since these operations
5202 // are only available for integer.
5203 if (TLI->getBooleanContents(VT.isVector(), false) ==
5205 return VTBits;
5206 break;
5207 case ISD::SETCC:
5208 case ISD::SETCCCARRY:
5209 case ISD::STRICT_FSETCC:
5210 case ISD::STRICT_FSETCCS: {
5211 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
5212 // If setcc returns 0/-1, all bits are sign bits.
5213 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
5215 return VTBits;
5216 break;
5217 }
5218 case ISD::ROTL:
5219 case ISD::ROTR:
5220 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
5221
5222 // If we're rotating an 0/-1 value, then it stays an 0/-1 value.
5223 if (Tmp == VTBits)
5224 return VTBits;
5225
5226 if (ConstantSDNode *C =
5227 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) {
5228 unsigned RotAmt = C->getAPIntValue().urem(VTBits);
5229
5230 // Handle rotate right by N like a rotate left by 32-N.
5231 if (Opcode == ISD::ROTR)
5232 RotAmt = (VTBits - RotAmt) % VTBits;
5233
5234 // If we aren't rotating out all of the known-in sign bits, return the
5235 // number that are left. This handles rotl(sext(x), 1) for example.
5236 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt);
5237 }
5238 break;
5239 case ISD::ADD:
5240 case ISD::ADDC:
5241 // TODO: Move Operand 1 check before Operand 0 check
5242 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
5243 if (Tmp == 1) return 1; // Early out.
5244
5245 // Special case decrementing a value (ADD X, -1):
5246 if (ConstantSDNode *CRHS =
5247 isConstOrConstSplat(Op.getOperand(1), DemandedElts))
5248 if (CRHS->isAllOnes()) {
5249 KnownBits Known =
5250 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
5251
5252 // If the input is known to be 0 or 1, the output is 0/-1, which is all
5253 // sign bits set.
5254 if ((Known.Zero | 1).isAllOnes())
5255 return VTBits;
5256
5257 // If we are subtracting one from a positive number, there is no carry
5258 // out of the result.
5259 if (Known.isNonNegative())
5260 return Tmp;
5261 }
5262
5263 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
5264 if (Tmp2 == 1) return 1; // Early out.
5265
5266 // Add can have at most one carry bit. Thus we know that the output
5267 // is, at worst, one more bit than the inputs.
5268 return std::min(Tmp, Tmp2) - 1;
5269 case ISD::SUB:
5270 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
5271 if (Tmp2 == 1) return 1; // Early out.
5272
5273 // Handle NEG.
5274 if (ConstantSDNode *CLHS =
5275 isConstOrConstSplat(Op.getOperand(0), DemandedElts))
5276 if (CLHS->isZero()) {
5277 KnownBits Known =
5278 computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
5279 // If the input is known to be 0 or 1, the output is 0/-1, which is all
5280 // sign bits set.
5281 if ((Known.Zero | 1).isAllOnes())
5282 return VTBits;
5283
5284 // If the input is known to be positive (the sign bit is known clear),
5285 // the output of the NEG has the same number of sign bits as the input.
5286 if (Known.isNonNegative())
5287 return Tmp2;
5288
5289 // Otherwise, we treat this like a SUB.
5290 }
5291
5292 // Sub can have at most one carry bit. Thus we know that the output
5293 // is, at worst, one more bit than the inputs.
5294 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
5295 if (Tmp == 1) return 1; // Early out.
5296 return std::min(Tmp, Tmp2) - 1;
5297 case ISD::MUL: {
5298 // The output of the Mul can be at most twice the valid bits in the inputs.
5299 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
5300 if (SignBitsOp0 == 1)
5301 break;
5302 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
5303 if (SignBitsOp1 == 1)
5304 break;
5305 unsigned OutValidBits =
5306 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
5307 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
5308 }
5309 case ISD::AVGCEILS:
5310 case ISD::AVGFLOORS:
5311 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
5312 if (Tmp == 1)
5313 return 1; // Early out.
5314 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
5315 return std::min(Tmp, Tmp2);
5316 case ISD::SREM:
5317 // The sign bit is the LHS's sign bit, except when the result of the
5318 // remainder is zero. The magnitude of the result should be less than or
5319 // equal to the magnitude of the LHS. Therefore, the result should have
5320 // at least as many sign bits as the left hand side.
5321 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
5322 case ISD::TRUNCATE: {
5323 // Check if the sign bits of source go down as far as the truncated value.
5324 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
5325 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
5326 if (NumSrcSignBits > (NumSrcBits - VTBits))
5327 return NumSrcSignBits - (NumSrcBits - VTBits);
5328 break;
5329 }
5330 case ISD::EXTRACT_ELEMENT: {
5331 if (VT.isScalableVector())
5332 break;
5333 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
5334 const int BitWidth = Op.getValueSizeInBits();
5335 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
5336
5337 // Get reverse index (starting from 1), Op1 value indexes elements from
5338 // little end. Sign starts at big end.
5339 const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
5340
5341 // If the sign portion ends in our element the subtraction gives correct
5342 // result. Otherwise it gives either negative or > bitwidth result
5343 return std::clamp(KnownSign - rIndex * BitWidth, 1, BitWidth);
5344 }
5346 if (VT.isScalableVector())
5347 break;
5348 // If we know the element index, split the demand between the
5349 // source vector and the inserted element, otherwise assume we need
5350 // the original demanded vector elements and the value.
5351 SDValue InVec = Op.getOperand(0);
5352 SDValue InVal = Op.getOperand(1);
5353 SDValue EltNo = Op.getOperand(2);
5354 bool DemandedVal = true;
5355 APInt DemandedVecElts = DemandedElts;
5356 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
5357 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
5358 unsigned EltIdx = CEltNo->getZExtValue();
5359 DemandedVal = !!DemandedElts[EltIdx];
5360 DemandedVecElts.clearBit(EltIdx);
5361 }
5362 Tmp = std::numeric_limits<unsigned>::max();
5363 if (DemandedVal) {
5364 // TODO - handle implicit truncation of inserted elements.
5365 if (InVal.getScalarValueSizeInBits() != VTBits)
5366 break;
5367 Tmp2 = ComputeNumSignBits(InVal, Depth + 1);
5368 Tmp = std::min(Tmp, Tmp2);
5369 }
5370 if (!!DemandedVecElts) {
5371 Tmp2 = ComputeNumSignBits(InVec, DemandedVecElts, Depth + 1);
5372 Tmp = std::min(Tmp, Tmp2);
5373 }
5374 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
5375 return Tmp;
5376 }
5378 SDValue InVec = Op.getOperand(0);
5379 SDValue EltNo = Op.getOperand(1);
5380 EVT VecVT = InVec.getValueType();
5381 // ComputeNumSignBits not yet implemented for scalable vectors.
5382 if (VecVT.isScalableVector())
5383 break;
5384 const unsigned BitWidth = Op.getValueSizeInBits();
5385 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
5386 const unsigned NumSrcElts = VecVT.getVectorNumElements();
5387
5388 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
5389 // anything about sign bits. But if the sizes match we can derive knowledge
5390 // about sign bits from the vector operand.
5391 if (BitWidth != EltBitWidth)
5392 break;
5393
5394 // If we know the element index, just demand that vector element, else for
5395 // an unknown element index, ignore DemandedElts and demand them all.
5396 APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts);
5397 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
5398 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
5399 DemandedSrcElts =
5400 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
5401
5402 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1);
5403 }
5405 // Offset the demanded elts by the subvector index.
5406 SDValue Src = Op.getOperand(0);
5407
5408 APInt DemandedSrcElts;
5409 if (Src.getValueType().isScalableVector())
5410 DemandedSrcElts = APInt(1, 1);
5411 else {
5412 uint64_t Idx = Op.getConstantOperandVal(1);
5413 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
5414 DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx);
5415 }
5416 return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
5417 }
5418 case ISD::CONCAT_VECTORS: {
5419 if (VT.isScalableVector())
5420 break;
5421 // Determine the minimum number of sign bits across all demanded
5422 // elts of the input vectors. Early out if the result is already 1.
5423 Tmp = std::numeric_limits<unsigned>::max();
5424 EVT SubVectorVT = Op.getOperand(0).getValueType();
5425 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
5426 unsigned NumSubVectors = Op.getNumOperands();
5427 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
5428 APInt DemandedSub =
5429 DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts);
5430 if (!DemandedSub)
5431 continue;
5432 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1);
5433 Tmp = std::min(Tmp, Tmp2);
5434 }
5435 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
5436 return Tmp;
5437 }
5438 case ISD::INSERT_SUBVECTOR: {
5439 if (VT.isScalableVector())
5440 break;
5441 // Demand any elements from the subvector and the remainder from the src its
5442 // inserted into.
5443 SDValue Src = Op.getOperand(0);
5444 SDValue Sub = Op.getOperand(1);
5445 uint64_t Idx = Op.getConstantOperandVal(2);
5446 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
5447 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
5448 APInt DemandedSrcElts = DemandedElts;
5449 DemandedSrcElts.clearBits(Idx, Idx + NumSubElts);
5450
5451 Tmp = std::numeric_limits<unsigned>::max();
5452 if (!!DemandedSubElts) {
5453 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1);
5454 if (Tmp == 1)
5455 return 1; // early-out
5456 }
5457 if (!!DemandedSrcElts) {
5458 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
5459 Tmp = std::min(Tmp, Tmp2);
5460 }
5461 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
5462 return Tmp;
5463 }
5464 case ISD::LOAD: {
5465 // If we are looking at the loaded value of the SDNode.
5466 if (Op.getResNo() != 0)
5467 break;
5468
5470 if (const MDNode *Ranges = LD->getRanges()) {
5471 if (DemandedElts != 1)
5472 break;
5473
5475 if (VTBits > CR.getBitWidth()) {
5476 switch (LD->getExtensionType()) {
5477 case ISD::SEXTLOAD:
5478 CR = CR.signExtend(VTBits);
5479 break;
5480 case ISD::ZEXTLOAD:
5481 CR = CR.zeroExtend(VTBits);
5482 break;
5483 default:
5484 break;
5485 }
5486 }
5487
5488 if (VTBits != CR.getBitWidth())
5489 break;
5490 return std::min(CR.getSignedMin().getNumSignBits(),
5492 }
5493
5494 unsigned ExtType = LD->getExtensionType();
5495 switch (ExtType) {
5496 default:
5497 break;
5498 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known.
5499 Tmp = LD->getMemoryVT().getScalarSizeInBits();
5500 return VTBits - Tmp + 1;
5501 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known.
5502 Tmp = LD->getMemoryVT().getScalarSizeInBits();
5503 return VTBits - Tmp;
5504 case ISD::NON_EXTLOAD:
5505 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) {
5506 // We only need to handle vectors - computeKnownBits should handle
5507 // scalar cases.
5508 Type *CstTy = Cst->getType();
5509 if (CstTy->isVectorTy() && !VT.isScalableVector() &&
5510 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits() &&
5511 VTBits == CstTy->getScalarSizeInBits()) {
5512 Tmp = VTBits;
5513 for (unsigned i = 0; i != NumElts; ++i) {
5514 if (!DemandedElts[i])
5515 continue;
5516 if (Constant *Elt = Cst->getAggregateElement(i)) {
5517 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
5518 const APInt &Value = CInt->getValue();
5519 Tmp = std::min(Tmp, Value.getNumSignBits());
5520 continue;
5521 }
5522 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
5523 APInt Value = CFP->getValueAPF().bitcastToAPInt();
5524 Tmp = std::min(Tmp, Value.getNumSignBits());
5525 continue;
5526 }
5527 }
5528 // Unknown type. Conservatively assume no bits match sign bit.
5529 return 1;
5530 }
5531 return Tmp;
5532 }
5533 }
5534 break;
5535 }
5536
5537 break;
5538 }
5541 case ISD::ATOMIC_SWAP:
5553 case ISD::ATOMIC_LOAD: {
5554 auto *AT = cast<AtomicSDNode>(Op);
5555 // If we are looking at the loaded value.
5556 if (Op.getResNo() == 0) {
5557 Tmp = AT->getMemoryVT().getScalarSizeInBits();
5558 if (Tmp == VTBits)
5559 return 1; // early-out
5560
5561 // For atomic_load, prefer to use the extension type.
5562 if (Op->getOpcode() == ISD::ATOMIC_LOAD) {
5563 switch (AT->getExtensionType()) {
5564 default:
5565 break;
5566 case ISD::SEXTLOAD:
5567 return VTBits - Tmp + 1;
5568 case ISD::ZEXTLOAD:
5569 return VTBits - Tmp;
5570 }
5571 }
5572
5573 if (TLI->getExtendForAtomicOps() == ISD::SIGN_EXTEND)
5574 return VTBits - Tmp + 1;
5575 if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND)
5576 return VTBits - Tmp;
5577 }
5578 break;
5579 }
5580 }
5581
5582 // Allow the target to implement this method for its nodes.
5583 if (Opcode >= ISD::BUILTIN_OP_END ||
5584 Opcode == ISD::INTRINSIC_WO_CHAIN ||
5585 Opcode == ISD::INTRINSIC_W_CHAIN ||
5586 Opcode == ISD::INTRINSIC_VOID) {
5587 // TODO: This can probably be removed once target code is audited. This
5588 // is here purely to reduce patch size and review complexity.
5589 if (!VT.isScalableVector()) {
5590 unsigned NumBits =
5591 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth);
5592 if (NumBits > 1)
5593 FirstAnswer = std::max(FirstAnswer, NumBits);
5594 }
5595 }
5596
5597 // Finally, if we can prove that the top bits of the result are 0's or 1's,
5598 // use this information.
5599 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
5600 return std::max(FirstAnswer, Known.countMinSignBits());
5601}
5602
5604 unsigned Depth) const {
5605 unsigned SignBits = ComputeNumSignBits(Op, Depth);
5606 return Op.getScalarValueSizeInBits() - SignBits + 1;
5607}
5608
5610 const APInt &DemandedElts,
5611 unsigned Depth) const {
5612 unsigned SignBits = ComputeNumSignBits(Op, DemandedElts, Depth);
5613 return Op.getScalarValueSizeInBits() - SignBits + 1;
5614}
5615
5617 UndefPoisonKind Kind,
5618 unsigned Depth) const {
5619 // Early out for FREEZE.
5620 if (Op.getOpcode() == ISD::FREEZE)
5621 return true;
5622
5623 APInt DemandedElts = getDemandAllEltsMask(Op);
5624 return isGuaranteedNotToBeUndefOrPoison(Op, DemandedElts, Kind, Depth);
5625}
5626
5628 const APInt &DemandedElts,
5629 UndefPoisonKind Kind,
5630 unsigned Depth) const {
5631 unsigned Opcode = Op.getOpcode();
5632
5633 // Early out for FREEZE.
5634 if (Opcode == ISD::FREEZE)
5635 return true;
5636
5637 if (Depth >= MaxRecursionDepth)
5638 return false; // Limit search depth.
5639
5640 if (isIntOrFPConstant(Op))
5641 return true;
5642
5643 switch (Opcode) {
5644 case ISD::CONDCODE:
5645 case ISD::VALUETYPE:
5646 case ISD::FrameIndex:
5648 case ISD::CopyFromReg:
5649 return true;
5650
5651 case ISD::POISON:
5652 return !includesPoison(Kind);
5653
5654 case ISD::UNDEF:
5655 return !includesUndef(Kind);
5656
5657 case ISD::BUILD_VECTOR:
5658 // NOTE: BUILD_VECTOR has implicit truncation of wider scalar elements -
5659 // this shouldn't affect the result.
5660 for (unsigned i = 0, e = Op.getNumOperands(); i < e; ++i) {
5661 if (!DemandedElts[i])
5662 continue;
5663 if (!isGuaranteedNotToBeUndefOrPoison(Op.getOperand(i), Kind, Depth + 1))
5664 return false;
5665 }
5666 return true;
5667
5669 SDValue Src = Op.getOperand(0);
5670 if (Src.getValueType().isScalableVector())
5671 break;
5672 uint64_t Idx = Op.getConstantOperandVal(1);
5673 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
5674 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx);
5675 return isGuaranteedNotToBeUndefOrPoison(Src, DemandedSrcElts, Kind,
5676 Depth + 1);
5677 }
5678
5679 case ISD::INSERT_SUBVECTOR: {
5680 if (Op.getValueType().isScalableVector())
5681 break;
5682 SDValue Src = Op.getOperand(0);
5683 SDValue Sub = Op.getOperand(1);
5684 uint64_t Idx = Op.getConstantOperandVal(2);
5685 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
5686 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
5687 APInt DemandedSrcElts = DemandedElts;
5688 DemandedSrcElts.clearBits(Idx, Idx + NumSubElts);
5689
5690 if (!!DemandedSubElts && !isGuaranteedNotToBeUndefOrPoison(
5691 Sub, DemandedSubElts, Kind, Depth + 1))
5692 return false;
5693 if (!!DemandedSrcElts && !isGuaranteedNotToBeUndefOrPoison(
5694 Src, DemandedSrcElts, Kind, Depth + 1))
5695 return false;
5696 return true;
5697 }
5698
5700 SDValue Src = Op.getOperand(0);
5701 auto *IndexC = dyn_cast<ConstantSDNode>(Op.getOperand(1));
5702 EVT SrcVT = Src.getValueType();
5703 if (SrcVT.isFixedLengthVector() && IndexC &&
5704 IndexC->getAPIntValue().ult(SrcVT.getVectorNumElements())) {
5705 APInt DemandedSrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
5706 IndexC->getZExtValue());
5707 return isGuaranteedNotToBeUndefOrPoison(Src, DemandedSrcElts, Kind,
5708 Depth + 1);
5709 }
5710 break;
5711 }
5712
5714 SDValue InVec = Op.getOperand(0);
5715 SDValue InVal = Op.getOperand(1);
5716 SDValue EltNo = Op.getOperand(2);
5717 EVT VT = InVec.getValueType();
5718 auto *IndexC = dyn_cast<ConstantSDNode>(EltNo);
5719 if (IndexC && VT.isFixedLengthVector() &&
5720 IndexC->getAPIntValue().ult(VT.getVectorNumElements())) {
5721 if (DemandedElts[IndexC->getZExtValue()] &&
5722 !isGuaranteedNotToBeUndefOrPoison(InVal, Kind, Depth + 1))
5723 return false;
5724 APInt InVecDemandedElts = DemandedElts;
5725 InVecDemandedElts.clearBit(IndexC->getZExtValue());
5726 if (!!InVecDemandedElts &&
5728 peekThroughInsertVectorElt(InVec, InVecDemandedElts),
5729 InVecDemandedElts, Kind, Depth + 1))
5730 return false;
5731 return true;
5732 }
5733 break;
5734 }
5735
5737 // Check upper (known undef) elements.
5738 if (DemandedElts.ugt(1) && includesUndef(Kind))
5739 return false;
5740 // Check element zero.
5741 if (DemandedElts[0] &&
5742 !isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), Kind, Depth + 1))
5743 return false;
5744 return true;
5745
5746 case ISD::SPLAT_VECTOR:
5747 return isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), Kind, Depth + 1);
5748
5749 case ISD::VECTOR_SHUFFLE: {
5750 APInt DemandedLHS, DemandedRHS;
5751 auto *SVN = cast<ShuffleVectorSDNode>(Op);
5752 if (!getShuffleDemandedElts(DemandedElts.getBitWidth(), SVN->getMask(),
5753 DemandedElts, DemandedLHS, DemandedRHS,
5754 /*AllowUndefElts=*/false))
5755 return false;
5756 if (!DemandedLHS.isZero() &&
5757 !isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), DemandedLHS, Kind,
5758 Depth + 1))
5759 return false;
5760 if (!DemandedRHS.isZero() &&
5761 !isGuaranteedNotToBeUndefOrPoison(Op.getOperand(1), DemandedRHS, Kind,
5762 Depth + 1))
5763 return false;
5764 return true;
5765 }
5766
5767 case ISD::SHL:
5768 case ISD::SRL:
5769 case ISD::SRA:
5770 // Shift amount operand is checked by canCreateUndefOrPoison. So it is
5771 // enough to check operand 0 if Op can't create undef/poison.
5772 return !canCreateUndefOrPoison(Op, DemandedElts, Kind,
5773 /*ConsiderFlags*/ true, Depth) &&
5774 isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), DemandedElts,
5775 Kind, Depth + 1);
5776
5777 case ISD::BSWAP:
5778 case ISD::CTPOP:
5779 case ISD::BITREVERSE:
5780 case ISD::AND:
5781 case ISD::OR:
5782 case ISD::XOR:
5783 case ISD::ADD:
5784 case ISD::SUB:
5785 case ISD::MUL:
5786 case ISD::SADDSAT:
5787 case ISD::UADDSAT:
5788 case ISD::SSUBSAT:
5789 case ISD::USUBSAT:
5790 case ISD::SSHLSAT:
5791 case ISD::USHLSAT:
5792 case ISD::SMIN:
5793 case ISD::SMAX:
5794 case ISD::UMIN:
5795 case ISD::UMAX:
5796 case ISD::ZERO_EXTEND:
5797 case ISD::SIGN_EXTEND:
5798 case ISD::ANY_EXTEND:
5799 case ISD::TRUNCATE:
5800 case ISD::VSELECT: {
5801 // If Op can't create undef/poison and none of its operands are undef/poison
5802 // then Op is never undef/poison. A difference from the more common check
5803 // below, outside the switch, is that we handle elementwise operations for
5804 // which the DemandedElts mask is valid for all operands here.
5805 return !canCreateUndefOrPoison(Op, DemandedElts, Kind,
5806 /*ConsiderFlags*/ true, Depth) &&
5807 all_of(Op->ops(), [&](SDValue V) {
5808 return isGuaranteedNotToBeUndefOrPoison(V, DemandedElts, Kind,
5809 Depth + 1);
5810 });
5811 }
5812
5813 // TODO: Search for noundef attributes from library functions.
5814
5815 // TODO: Pointers dereferenced by ISD::LOAD/STORE ops are noundef.
5816
5817 default:
5818 // Allow the target to implement this method for its nodes.
5819 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
5820 Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID)
5821 return TLI->isGuaranteedNotToBeUndefOrPoisonForTargetNode(
5822 Op, DemandedElts, *this, Kind, Depth);
5823 break;
5824 }
5825
5826 // If Op can't create undef/poison and none of its operands are undef/poison
5827 // then Op is never undef/poison.
5828 // NOTE: TargetNodes can handle this in themselves in
5829 // isGuaranteedNotToBeUndefOrPoisonForTargetNode or let
5830 // TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode handle it.
5831 return !canCreateUndefOrPoison(Op, Kind, /*ConsiderFlags*/ true, Depth) &&
5832 all_of(Op->ops(), [&](SDValue V) {
5833 return isGuaranteedNotToBeUndefOrPoison(V, Kind, Depth + 1);
5834 });
5835}
5836
5838 bool ConsiderFlags,
5839 unsigned Depth) const {
5840 APInt DemandedElts = getDemandAllEltsMask(Op);
5841 return canCreateUndefOrPoison(Op, DemandedElts, Kind, ConsiderFlags, Depth);
5842}
5843
5845 UndefPoisonKind Kind,
5846 bool ConsiderFlags,
5847 unsigned Depth) const {
5848 if (ConsiderFlags && includesPoison(Kind) && Op->hasPoisonGeneratingFlags())
5849 return true;
5850
5851 unsigned Opcode = Op.getOpcode();
5852 switch (Opcode) {
5853 case ISD::AssertSext:
5854 case ISD::AssertZext:
5855 case ISD::AssertAlign:
5857 // Assertion nodes can create poison if the assertion fails.
5858 return includesPoison(Kind);
5859
5860 case ISD::FREEZE:
5864 case ISD::SADDSAT:
5865 case ISD::UADDSAT:
5866 case ISD::SSUBSAT:
5867 case ISD::USUBSAT:
5868 case ISD::MULHU:
5869 case ISD::MULHS:
5870 case ISD::AVGFLOORS:
5871 case ISD::AVGFLOORU:
5872 case ISD::AVGCEILS:
5873 case ISD::AVGCEILU:
5874 case ISD::ABDU:
5875 case ISD::ABDS:
5876 case ISD::SMIN:
5877 case ISD::SMAX:
5878 case ISD::SCMP:
5879 case ISD::UMIN:
5880 case ISD::UMAX:
5881 case ISD::UCMP:
5882 case ISD::AND:
5883 case ISD::XOR:
5884 case ISD::ROTL:
5885 case ISD::ROTR:
5886 case ISD::FSHL:
5887 case ISD::FSHR:
5888 case ISD::BSWAP:
5889 case ISD::CTTZ:
5890 case ISD::CTLZ:
5891 case ISD::CTLS:
5892 case ISD::CTPOP:
5893 case ISD::BITREVERSE:
5894 case ISD::PARITY:
5895 case ISD::SIGN_EXTEND:
5896 case ISD::TRUNCATE:
5900 case ISD::BITCAST:
5901 case ISD::BUILD_VECTOR:
5902 case ISD::BUILD_PAIR:
5903 case ISD::SPLAT_VECTOR:
5904 case ISD::FABS:
5905 case ISD::FCEIL:
5906 case ISD::FFLOOR:
5907 case ISD::FTRUNC:
5908 case ISD::FRINT:
5909 case ISD::FNEARBYINT:
5910 case ISD::FROUND:
5911 case ISD::FROUNDEVEN:
5912 return false;
5913
5914 case ISD::ABS:
5915 // ISD::ABS defines abs(INT_MIN) -> INT_MIN and never generates poison.
5916 // Different to Intrinsic::abs.
5917 return false;
5919 // ABS_MIN_POISON may produce poison if the input is INT_MIN.
5920 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1) <= 1;
5921
5922 case ISD::ADDC:
5923 case ISD::SUBC:
5924 case ISD::ADDE:
5925 case ISD::SUBE:
5926 case ISD::SADDO:
5927 case ISD::SSUBO:
5928 case ISD::SMULO:
5929 case ISD::SADDO_CARRY:
5930 case ISD::SSUBO_CARRY:
5931 case ISD::UADDO:
5932 case ISD::USUBO:
5933 case ISD::UMULO:
5934 case ISD::UADDO_CARRY:
5935 case ISD::USUBO_CARRY:
5936 // No poison on result or overflow flags.
5937 return false;
5938
5939 case ISD::SELECT_CC:
5940 case ISD::SETCC: {
5941 // Integer setcc cannot create undef or poison.
5942 if (Op.getOperand(0).getValueType().isInteger())
5943 return false;
5944
5945 // FP compares are more complicated. They can create poison for nan/infinity
5946 // based on options and flags. The options and flags also cause special
5947 // nonan condition codes to be used. Those condition codes may be preserved
5948 // even if the nonan flag is dropped somewhere.
5949 unsigned CCOp = Opcode == ISD::SETCC ? 2 : 4;
5950 ISD::CondCode CCCode = cast<CondCodeSDNode>(Op.getOperand(CCOp))->get();
5951 return (unsigned)CCCode & 0x10U;
5952 }
5953
5954 case ISD::OR:
5955 case ISD::ZERO_EXTEND:
5956 case ISD::SELECT:
5957 case ISD::VSELECT:
5958 case ISD::ADD:
5959 case ISD::SUB:
5960 case ISD::MUL:
5961 case ISD::FNEG:
5962 case ISD::FADD:
5963 case ISD::FSUB:
5964 case ISD::FMUL:
5965 case ISD::FDIV:
5966 case ISD::FREM:
5967 case ISD::FCOPYSIGN:
5968 case ISD::FMA:
5969 case ISD::FMAD:
5970 case ISD::FMULADD:
5971 case ISD::FP_EXTEND:
5972 case ISD::FMINNUM:
5973 case ISD::FMAXNUM:
5974 case ISD::FMINNUM_IEEE:
5975 case ISD::FMAXNUM_IEEE:
5976 case ISD::FMINIMUM:
5977 case ISD::FMAXIMUM:
5978 case ISD::FMINIMUMNUM:
5979 case ISD::FMAXIMUMNUM:
5985 // No poison except from flags (which is handled above)
5986 return false;
5987
5988 case ISD::SHL:
5989 case ISD::SRL:
5990 case ISD::SRA:
5991 // If the max shift amount isn't in range, then the shift can
5992 // create poison.
5993 return includesPoison(Kind) &&
5994 !getValidMaximumShiftAmount(Op, DemandedElts, Depth + 1);
5995
5998 // If the amount is zero then the result will be poison.
5999 // TODO: Add isKnownNeverZero DemandedElts handling.
6000 return includesPoison(Kind) &&
6001 !isKnownNeverZero(Op.getOperand(0), Depth + 1);
6002
6004 // Check if we demand any upper (undef) elements.
6005 return includesUndef(Kind) && DemandedElts.ugt(1);
6006
6009 // Ensure that the element index is in bounds.
6010 if (includesPoison(Kind)) {
6011 EVT VecVT = Op.getOperand(0).getValueType();
6012 SDValue Idx = Op.getOperand(Opcode == ISD::INSERT_VECTOR_ELT ? 2 : 1);
6013 KnownBits KnownIdx = computeKnownBits(Idx, Depth + 1);
6014 return KnownIdx.getMaxValue().uge(VecVT.getVectorMinNumElements());
6015 }
6016 return false;
6017 }
6018
6019 case ISD::VECTOR_SHUFFLE: {
6020 // Check for any demanded shuffle element that is undef.
6021 auto *SVN = cast<ShuffleVectorSDNode>(Op);
6022 for (auto [Idx, Elt] : enumerate(SVN->getMask()))
6023 if (Elt < 0 && DemandedElts[Idx])
6024 return true;
6025 return false;
6026 }
6027
6029 return false;
6030
6031 default:
6032 // Allow the target to implement this method for its nodes.
6033 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
6034 Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID)
6035 return TLI->canCreateUndefOrPoisonForTargetNode(
6036 Op, DemandedElts, *this, Kind, ConsiderFlags, Depth);
6037 break;
6038 }
6039
6040 // Be conservative and return true.
6041 return true;
6042}
6043
6044bool SelectionDAG::isADDLike(SDValue Op, bool NoWrap) const {
6045 unsigned Opcode = Op.getOpcode();
6046 if (Opcode == ISD::OR)
6047 return Op->getFlags().hasDisjoint() ||
6048 haveNoCommonBitsSet(Op.getOperand(0), Op.getOperand(1));
6049 if (Opcode == ISD::XOR)
6050 return !NoWrap && isMinSignedConstant(Op.getOperand(1));
6051 return false;
6052}
6053
6055 return Op.getNumOperands() == 2 && isa<ConstantSDNode>(Op.getOperand(1)) &&
6056 (Op.isAnyAdd() || isADDLike(Op));
6057}
6058
6060 FPClassTest InterestedClasses,
6061 unsigned Depth) const {
6062 APInt DemandedElts = getDemandAllEltsMask(Op);
6063 return computeKnownFPClass(Op, DemandedElts, InterestedClasses, Depth);
6064}
6065
6067 const APInt &DemandedElts,
6068 FPClassTest InterestedClasses,
6069 unsigned Depth) const {
6070 KnownFPClass Known;
6071
6072 if (const auto *CFP = dyn_cast<ConstantFPSDNode>(Op))
6073 return KnownFPClass(CFP->getValueAPF());
6074
6075 if (Depth >= MaxRecursionDepth)
6076 return Known;
6077
6078 if (Op.getOpcode() == ISD::UNDEF)
6079 return Known;
6080
6081 EVT VT = Op.getValueType();
6082 assert(VT.isFloatingPoint() && "Computing KnownFPClass on non-FP op!");
6083 assert((!VT.isFixedLengthVector() ||
6084 DemandedElts.getBitWidth() == VT.getVectorNumElements()) &&
6085 "Unexpected vector size");
6086
6087 if (!DemandedElts)
6088 return Known;
6089
6090 unsigned Opcode = Op.getOpcode();
6091 switch (Opcode) {
6092 case ISD::POISON: {
6093 Known.KnownFPClasses = fcNone;
6094 Known.SignBit = false;
6095 break;
6096 }
6097 case ISD::FNEG: {
6098 Known = computeKnownFPClass(Op.getOperand(0), DemandedElts,
6099 InterestedClasses, Depth + 1);
6100 Known.fneg();
6101 break;
6102 }
6103 case ISD::BUILD_VECTOR: {
6104 assert(!VT.isScalableVector());
6105 bool First = true;
6106 for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) {
6107 if (!DemandedElts[I])
6108 continue;
6109
6110 if (First) {
6111 Known =
6112 computeKnownFPClass(Op.getOperand(I), InterestedClasses, Depth + 1);
6113 First = false;
6114 } else {
6115 Known |=
6116 computeKnownFPClass(Op.getOperand(I), InterestedClasses, Depth + 1);
6117 }
6118
6119 if (Known.isUnknown())
6120 break;
6121 }
6122 break;
6123 }
6125 SDValue Src = Op.getOperand(0);
6126 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
6127 EVT SrcVT = Src.getValueType();
6128 if (SrcVT.isFixedLengthVector() && CIdx) {
6129 if (CIdx->getAPIntValue().ult(SrcVT.getVectorNumElements())) {
6130 APInt DemandedSrcElts = APInt::getOneBitSet(
6131 SrcVT.getVectorNumElements(), CIdx->getZExtValue());
6132 Known = computeKnownFPClass(Src, DemandedSrcElts, InterestedClasses,
6133 Depth + 1);
6134 } else {
6135 // Out of bounds index is poison.
6136 Known.KnownFPClasses = fcNone;
6137 }
6138 } else {
6139 Known = computeKnownFPClass(Src, InterestedClasses, Depth + 1);
6140 }
6141 break;
6142 }
6143 case ISD::SPLAT_VECTOR: {
6144 Known = computeKnownFPClass(Op.getOperand(0), InterestedClasses, Depth + 1);
6145 break;
6146 }
6147 case ISD::BITCAST: {
6148 // FIXME: It should not be necessary to check for an elementwise bitcast.
6149 // If a bitcast is not elementwise between vector / scalar types,
6150 // computeKnownBits already splices the known bits of the source elements
6151 // appropriately so as to line up with the bits of the result's demanded
6152 // elements.
6153 EVT SrcVT = Op.getOperand(0).getValueType();
6154 if (VT.isScalableVector() || SrcVT.isScalableVector())
6155 break;
6156 unsigned VTNumElts = VT.isVector() ? VT.getVectorNumElements() : 1;
6157 unsigned SrcVTNumElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
6158 if (VTNumElts != SrcVTNumElts)
6159 break;
6160
6161 KnownBits Bits = computeKnownBits(Op, DemandedElts, Depth + 1);
6162 Known = KnownFPClass::bitcast(VT.getFltSemantics(), Bits);
6163 break;
6164 }
6165 case ISD::FABS: {
6166 Known = computeKnownFPClass(Op.getOperand(0), DemandedElts,
6167 InterestedClasses, Depth + 1);
6168 Known.fabs();
6169 break;
6170 }
6171 case ISD::FCOPYSIGN: {
6172 Known = computeKnownFPClass(Op.getOperand(0), DemandedElts,
6173 InterestedClasses, Depth + 1);
6174 KnownFPClass KnownSign = computeKnownFPClass(Op.getOperand(1), DemandedElts,
6175 InterestedClasses, Depth + 1);
6176 Known.copysign(KnownSign);
6177 break;
6178 }
6179 case ISD::AssertNoFPClass: {
6180 Known = computeKnownFPClass(Op.getOperand(0), DemandedElts,
6181 InterestedClasses, Depth + 1);
6182 FPClassTest AssertedClasses =
6183 static_cast<FPClassTest>(Op->getConstantOperandVal(1));
6184 Known.KnownFPClasses &= ~AssertedClasses;
6185 break;
6186 }
6188 SDValue Src = Op.getOperand(0);
6189 EVT SrcVT = Src.getValueType();
6190 if (SrcVT.isFixedLengthVector()) {
6191 unsigned Idx = Op.getConstantOperandVal(1);
6192 unsigned NumSrcElts = SrcVT.getVectorNumElements();
6193
6194 APInt DemandedSrcElts = DemandedElts.zextOrTrunc(NumSrcElts).shl(Idx);
6195 Known = computeKnownFPClass(Src, DemandedSrcElts, InterestedClasses,
6196 Depth + 1);
6197 } else {
6198 Known = computeKnownFPClass(Src, InterestedClasses, Depth + 1);
6199 }
6200 break;
6201 }
6202 case ISD::INSERT_SUBVECTOR: {
6203 SDValue BaseVector = Op.getOperand(0);
6204 SDValue SubVector = Op.getOperand(1);
6205 EVT BaseVT = BaseVector.getValueType();
6206 if (BaseVT.isFixedLengthVector()) {
6207 unsigned Idx = Op.getConstantOperandVal(2);
6208 unsigned NumBaseElts = BaseVT.getVectorNumElements();
6209 unsigned NumSubElts = SubVector.getValueType().getVectorNumElements();
6210
6211 APInt DemandedMask =
6212 APInt::getBitsSet(NumBaseElts, Idx, Idx + NumSubElts);
6213 APInt DemandedSrcElts = DemandedElts & ~DemandedMask;
6214 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
6215
6216 if (!DemandedSrcElts.isZero())
6217 Known = computeKnownFPClass(BaseVector, DemandedSrcElts,
6218 InterestedClasses, Depth + 1);
6219 if (!DemandedSubElts.isZero()) {
6221 SubVector, DemandedSubElts, InterestedClasses, Depth + 1);
6222 Known = DemandedSrcElts.isZero() ? SubKnown : (Known | SubKnown);
6223 }
6224 } else {
6225 Known = computeKnownFPClass(SubVector, InterestedClasses, Depth + 1);
6226 if (!Known.isUnknown())
6227 Known |= computeKnownFPClass(BaseVector, InterestedClasses, Depth + 1);
6228 }
6229 break;
6230 }
6231 case ISD::SELECT:
6232 case ISD::VSELECT: {
6233 // TODO: Add adjustKnownFPClassForSelectArm clamp recognition as in
6234 // IR-level ValueTracking.
6235 KnownFPClass KnownFalseClass = computeKnownFPClass(
6236 Op.getOperand(2), DemandedElts, InterestedClasses, Depth + 1);
6237 if (KnownFalseClass.isUnknown())
6238 break;
6239 KnownFPClass KnownTrueClass = computeKnownFPClass(
6240 Op.getOperand(1), DemandedElts, InterestedClasses, Depth + 1);
6241 Known = KnownTrueClass.intersectWith(KnownFalseClass);
6242 break;
6243 }
6244 default:
6245 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
6246 Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID) {
6247 TLI->computeKnownFPClassForTargetNode(Op, Known, DemandedElts, *this,
6248 Depth);
6249 }
6250 break;
6251 }
6252
6253 return Known;
6254}
6255
6257 unsigned Depth) const {
6258 APInt DemandedElts = getDemandAllEltsMask(Op);
6259 return isKnownNeverNaN(Op, DemandedElts, SNaN, Depth);
6260}
6261
6263 bool SNaN, unsigned Depth) const {
6264 assert(!DemandedElts.isZero() && "No demanded elements");
6265
6266 // If we're told that NaNs won't happen, assume they won't.
6267 if (Op->getFlags().hasNoNaNs())
6268 return true;
6269
6270 if (Depth >= MaxRecursionDepth)
6271 return false; // Limit search depth.
6272
6273 unsigned Opcode = Op.getOpcode();
6274 switch (Opcode) {
6275 case ISD::FADD:
6276 case ISD::FSUB:
6277 case ISD::FMUL:
6278 case ISD::FDIV:
6279 case ISD::FREM:
6280 case ISD::FSIN:
6281 case ISD::FCOS:
6282 case ISD::FTAN:
6283 case ISD::FASIN:
6284 case ISD::FACOS:
6285 case ISD::FATAN:
6286 case ISD::FATAN2:
6287 case ISD::FSINH:
6288 case ISD::FCOSH:
6289 case ISD::FTANH:
6290 case ISD::FMA:
6291 case ISD::FMULADD:
6292 case ISD::FMAD: {
6293 if (SNaN)
6294 return true;
6295 // TODO: Need isKnownNeverInfinity
6296 return false;
6297 }
6298 case ISD::FCANONICALIZE:
6299 case ISD::FEXP:
6300 case ISD::FEXP2:
6301 case ISD::FEXP10:
6302 case ISD::FTRUNC:
6303 case ISD::FFLOOR:
6304 case ISD::FCEIL:
6305 case ISD::FROUND:
6306 case ISD::FROUNDEVEN:
6307 case ISD::LROUND:
6308 case ISD::LLROUND:
6309 case ISD::FRINT:
6310 case ISD::LRINT:
6311 case ISD::LLRINT:
6312 case ISD::FNEARBYINT:
6313 case ISD::FLDEXP: {
6314 if (SNaN)
6315 return true;
6316 return isKnownNeverNaN(Op.getOperand(0), DemandedElts, SNaN, Depth + 1);
6317 }
6318 case ISD::FABS:
6319 case ISD::FNEG:
6320 case ISD::FCOPYSIGN: {
6321 return isKnownNeverNaN(Op.getOperand(0), DemandedElts, SNaN, Depth + 1);
6322 }
6323 case ISD::SELECT:
6324 return isKnownNeverNaN(Op.getOperand(1), DemandedElts, SNaN, Depth + 1) &&
6325 isKnownNeverNaN(Op.getOperand(2), DemandedElts, SNaN, Depth + 1);
6326 case ISD::FP_EXTEND:
6327 case ISD::FP_ROUND: {
6328 if (SNaN)
6329 return true;
6330 return isKnownNeverNaN(Op.getOperand(0), DemandedElts, SNaN, Depth + 1);
6331 }
6332 case ISD::SINT_TO_FP:
6333 case ISD::UINT_TO_FP:
6334 return true;
6335 case ISD::FSQRT: // Need is known positive
6336 case ISD::FLOG:
6337 case ISD::FLOG2:
6338 case ISD::FLOG10:
6339 case ISD::FPOWI:
6340 case ISD::FPOW: {
6341 if (SNaN)
6342 return true;
6343 // TODO: Refine on operand
6344 return false;
6345 }
6346 case ISD::FMINNUM:
6347 case ISD::FMAXNUM:
6348 case ISD::FMINIMUMNUM:
6349 case ISD::FMAXIMUMNUM: {
6350 // Only one needs to be known not-nan, since it will be returned if the
6351 // other ends up being one.
6352 return isKnownNeverNaN(Op.getOperand(0), DemandedElts, SNaN, Depth + 1) ||
6353 isKnownNeverNaN(Op.getOperand(1), DemandedElts, SNaN, Depth + 1);
6354 }
6355 case ISD::FMINNUM_IEEE:
6356 case ISD::FMAXNUM_IEEE: {
6357 if (SNaN)
6358 return true;
6359 // This can return a NaN if either operand is an sNaN, or if both operands
6360 // are NaN.
6361 return (isKnownNeverNaN(Op.getOperand(0), DemandedElts, false, Depth + 1) &&
6362 isKnownNeverSNaN(Op.getOperand(1), DemandedElts, Depth + 1)) ||
6363 (isKnownNeverNaN(Op.getOperand(1), DemandedElts, false, Depth + 1) &&
6364 isKnownNeverSNaN(Op.getOperand(0), DemandedElts, Depth + 1));
6365 }
6366 case ISD::FMINIMUM:
6367 case ISD::FMAXIMUM: {
6368 // TODO: Does this quiet or return the origina NaN as-is?
6369 return isKnownNeverNaN(Op.getOperand(0), DemandedElts, SNaN, Depth + 1) &&
6370 isKnownNeverNaN(Op.getOperand(1), DemandedElts, SNaN, Depth + 1);
6371 }
6373 SDValue Src = Op.getOperand(0);
6374 auto *Idx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
6375 EVT SrcVT = Src.getValueType();
6376 if (SrcVT.isFixedLengthVector() && Idx &&
6377 Idx->getAPIntValue().ult(SrcVT.getVectorNumElements())) {
6378 APInt DemandedSrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
6379 Idx->getZExtValue());
6380 return isKnownNeverNaN(Src, DemandedSrcElts, SNaN, Depth + 1);
6381 }
6382 return isKnownNeverNaN(Src, SNaN, Depth + 1);
6383 }
6385 SDValue Src = Op.getOperand(0);
6386 if (Src.getValueType().isFixedLengthVector()) {
6387 unsigned Idx = Op.getConstantOperandVal(1);
6388 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
6389 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx);
6390 return isKnownNeverNaN(Src, DemandedSrcElts, SNaN, Depth + 1);
6391 }
6392 return isKnownNeverNaN(Src, SNaN, Depth + 1);
6393 }
6394 case ISD::INSERT_SUBVECTOR: {
6395 SDValue BaseVector = Op.getOperand(0);
6396 SDValue SubVector = Op.getOperand(1);
6397 EVT BaseVectorVT = BaseVector.getValueType();
6398 if (BaseVectorVT.isFixedLengthVector()) {
6399 unsigned Idx = Op.getConstantOperandVal(2);
6400 unsigned NumBaseElts = BaseVectorVT.getVectorNumElements();
6401 unsigned NumSubElts = SubVector.getValueType().getVectorNumElements();
6402
6403 // Clear/Extract the bits at the position where the subvector will be
6404 // inserted.
6405 APInt DemandedMask =
6406 APInt::getBitsSet(NumBaseElts, Idx, Idx + NumSubElts);
6407 APInt DemandedSrcElts = DemandedElts & ~DemandedMask;
6408 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
6409
6410 bool NeverNaN = true;
6411 if (!DemandedSrcElts.isZero())
6412 NeverNaN &=
6413 isKnownNeverNaN(BaseVector, DemandedSrcElts, SNaN, Depth + 1);
6414 if (NeverNaN && !DemandedSubElts.isZero())
6415 NeverNaN &=
6416 isKnownNeverNaN(SubVector, DemandedSubElts, SNaN, Depth + 1);
6417 return NeverNaN;
6418 }
6419 return isKnownNeverNaN(BaseVector, SNaN, Depth + 1) &&
6420 isKnownNeverNaN(SubVector, SNaN, Depth + 1);
6421 }
6422 case ISD::BUILD_VECTOR: {
6423 unsigned NumElts = Op.getNumOperands();
6424 for (unsigned I = 0; I != NumElts; ++I)
6425 if (DemandedElts[I] &&
6426 !isKnownNeverNaN(Op.getOperand(I), SNaN, Depth + 1))
6427 return false;
6428 return true;
6429 }
6430 case ISD::SPLAT_VECTOR:
6431 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
6432 case ISD::AssertNoFPClass: {
6433 FPClassTest NoFPClass =
6434 static_cast<FPClassTest>(Op.getConstantOperandVal(1));
6435 if ((NoFPClass & fcNan) == fcNan)
6436 return true;
6437 if (SNaN && (NoFPClass & fcSNan) == fcSNan)
6438 return true;
6439 return isKnownNeverNaN(Op.getOperand(0), DemandedElts, SNaN, Depth + 1);
6440 }
6441 default:
6442 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
6443 Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID) {
6444 return TLI->isKnownNeverNaNForTargetNode(Op, DemandedElts, *this, SNaN,
6445 Depth);
6446 }
6447 break;
6448 }
6449
6450 FPClassTest NanMask = SNaN ? fcSNan : fcNan;
6451 KnownFPClass Known = computeKnownFPClass(Op, DemandedElts, NanMask, Depth);
6452 return Known.isKnownNever(NanMask);
6453}
6454
6456 APInt DemandedElts = getDemandAllEltsMask(Op);
6457 return isKnownNeverLogicalZero(Op, DemandedElts, Depth);
6458}
6459
6461 const APInt &DemandedElts,
6462 unsigned Depth) const {
6463 assert(!DemandedElts.isZero() && "No demanded elements");
6464 EVT VT = Op.getValueType();
6465 KnownFPClass Known =
6466 computeKnownFPClass(Op, DemandedElts, fcZero | fcSubnormal, Depth);
6467 return Known.isKnownNeverLogicalZero(getDenormalMode(VT));
6468}
6469
6471 APInt DemandedElts = getDemandAllEltsMask(Op);
6472 return isKnownNeverZero(Op, DemandedElts, Depth);
6473}
6474
6476 unsigned Depth) const {
6477 if (Depth >= MaxRecursionDepth)
6478 return false; // Limit search depth.
6479
6480 EVT OpVT = Op.getValueType();
6481 unsigned BitWidth = OpVT.getScalarSizeInBits();
6482
6483 assert(!Op.getValueType().isFloatingPoint() &&
6484 "Floating point types unsupported - use isKnownNeverLogicalZero");
6485
6486 // If the value is a constant, we can obviously see if it is a zero or not.
6487 auto IsNeverZero = [BitWidth](const ConstantSDNode *C) {
6488 APInt V = C->getAPIntValue().zextOrTrunc(BitWidth);
6489 return !V.isZero();
6490 };
6491
6492 if (ISD::matchUnaryPredicate(Op, IsNeverZero))
6493 return true;
6494
6495 // TODO: Recognize more cases here. Most of the cases are also incomplete to
6496 // some degree.
6497 switch (Op.getOpcode()) {
6498 default:
6499 break;
6500
6501 case ISD::BUILD_VECTOR:
6502 // Are all operands of a build vector constant non-zero?
6503 if (all_of(enumerate(Op->ops()), [&](auto P) {
6504 auto *C = dyn_cast<ConstantSDNode>(P.value());
6505 return !DemandedElts[P.index()] || (C && IsNeverZero(C));
6506 }))
6507 return true;
6508 break;
6509
6510 case ISD::SPLAT_VECTOR:
6511 // Is the operand of a splat vector a constant non-zero?
6512 if (auto *C = dyn_cast<ConstantSDNode>(Op->getOperand(0)))
6513 if (IsNeverZero(C))
6514 return true;
6515 break;
6516
6518 SDValue InVec = Op.getOperand(0);
6519 SDValue EltNo = Op.getOperand(1);
6520 EVT VecVT = InVec.getValueType();
6521
6522 // Skip scalable vectors or implicit extensions.
6523 if (VecVT.isScalableVector() ||
6524 OpVT.getScalarSizeInBits() != VecVT.getScalarSizeInBits())
6525 break;
6526
6527 // If we know the element index, just demand that vector element, else for
6528 // an unknown element index, ignore DemandedElts and demand them all.
6529 const unsigned NumSrcElts = VecVT.getVectorNumElements();
6530 APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts);
6531 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
6532 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
6533 DemandedSrcElts =
6534 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
6535
6536 return isKnownNeverZero(InVec, DemandedSrcElts, Depth + 1);
6537 }
6538
6539 case ISD::OR:
6540 return isKnownNeverZero(Op.getOperand(1), DemandedElts, Depth + 1) ||
6541 isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
6542
6543 case ISD::VSELECT:
6544 case ISD::SELECT:
6545 return isKnownNeverZero(Op.getOperand(1), DemandedElts, Depth + 1) &&
6546 isKnownNeverZero(Op.getOperand(2), DemandedElts, Depth + 1);
6547
6548 case ISD::SHL: {
6549 if (Op->getFlags().hasNoSignedWrap() || Op->getFlags().hasNoUnsignedWrap())
6550 return isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
6551 KnownBits ValKnown =
6552 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6553 // 1 << X is never zero.
6554 if (ValKnown.One[0])
6555 return true;
6556 // If max shift cnt of known ones is non-zero, result is non-zero.
6557 APInt MaxCnt = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1)
6558 .getMaxValue();
6559 if (MaxCnt.ult(ValKnown.getBitWidth()) &&
6560 !ValKnown.One.shl(MaxCnt).isZero())
6561 return true;
6562 break;
6563 }
6564
6565 case ISD::VECTOR_SHUFFLE: {
6566 if (Op.getValueType().isScalableVector())
6567 return false;
6568
6569 unsigned NumElts = DemandedElts.getBitWidth();
6570
6571 // All demanded elements from LHS and RHS must be known non-zero.
6572 // Demanded elements with undef shuffle mask elements are unknown.
6573
6574 APInt DemandedLHS, DemandedRHS;
6575 auto *SVN = cast<ShuffleVectorSDNode>(Op);
6576 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
6577 if (!getShuffleDemandedElts(NumElts, SVN->getMask(), DemandedElts,
6578 DemandedLHS, DemandedRHS))
6579 return false;
6580
6581 return (!DemandedLHS ||
6582 isKnownNeverZero(Op.getOperand(0), DemandedLHS, Depth + 1)) &&
6583 (!DemandedRHS ||
6584 isKnownNeverZero(Op.getOperand(1), DemandedRHS, Depth + 1));
6585 }
6586
6587 case ISD::UADDSAT:
6588 case ISD::UMAX:
6589 return isKnownNeverZero(Op.getOperand(1), DemandedElts, Depth + 1) ||
6590 isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
6591
6592 case ISD::UMIN:
6593 return isKnownNeverZero(Op.getOperand(1), DemandedElts, Depth + 1) &&
6594 isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
6595
6596 // For smin/smax: If either operand is known negative/positive
6597 // respectively we don't need the other to be known at all.
6598 case ISD::SMAX: {
6599 KnownBits Op1 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6600 if (Op1.isStrictlyPositive())
6601 return true;
6602
6603 KnownBits Op0 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6604 if (Op0.isStrictlyPositive())
6605 return true;
6606
6607 if (Op1.isNonZero() && Op0.isNonZero())
6608 return true;
6609
6610 return isKnownNeverZero(Op.getOperand(1), DemandedElts, Depth + 1) &&
6611 isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
6612 }
6613 case ISD::SMIN: {
6614 KnownBits Op1 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6615 if (Op1.isNegative())
6616 return true;
6617
6618 KnownBits Op0 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6619 if (Op0.isNegative())
6620 return true;
6621
6622 if (Op1.isNonZero() && Op0.isNonZero())
6623 return true;
6624
6625 return isKnownNeverZero(Op.getOperand(1), DemandedElts, Depth + 1) &&
6626 isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
6627 }
6628
6629 case ISD::ROTL:
6630 case ISD::ROTR:
6631 case ISD::BITREVERSE:
6632 case ISD::BSWAP:
6633 case ISD::CTPOP:
6634 case ISD::ABS:
6636 return isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
6637
6638 case ISD::SRA:
6639 case ISD::SRL: {
6640 if (Op->getFlags().hasExact())
6641 return isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
6642 KnownBits ValKnown =
6643 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6644 if (ValKnown.isNegative())
6645 return true;
6646 // If max shift cnt of known ones is non-zero, result is non-zero.
6647 APInt MaxCnt = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1)
6648 .getMaxValue();
6649 if (MaxCnt.ult(ValKnown.getBitWidth()) &&
6650 !ValKnown.One.lshr(MaxCnt).isZero())
6651 return true;
6652 break;
6653 }
6654 case ISD::UDIV:
6655 case ISD::SDIV:
6656 // div exact can only produce a zero if the dividend is zero.
6657 // TODO: For udiv this is also true if Op1 u<= Op0
6658 if (Op->getFlags().hasExact())
6659 return isKnownNeverZero(Op.getOperand(0), Depth + 1);
6660 break;
6661
6662 case ISD::ADD:
6663 if (Op->getFlags().hasNoUnsignedWrap())
6664 if (isKnownNeverZero(Op.getOperand(1), DemandedElts, Depth + 1) ||
6665 isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1))
6666 return true;
6667 // TODO: There are a lot more cases we can prove for add.
6668 break;
6669
6670 case ISD::SUB: {
6671 if (isNullConstant(Op.getOperand(0)))
6672 return isKnownNeverZero(Op.getOperand(1), DemandedElts, Depth + 1);
6673
6674 std::optional<bool> ne = KnownBits::ne(
6675 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1),
6676 computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1));
6677 return ne && *ne;
6678 }
6679
6680 case ISD::MUL:
6681 if (Op->getFlags().hasNoSignedWrap() || Op->getFlags().hasNoUnsignedWrap())
6682 if (isKnownNeverZero(Op.getOperand(1), Depth + 1) &&
6683 isKnownNeverZero(Op.getOperand(0), Depth + 1))
6684 return true;
6685 break;
6686
6687 case ISD::ZERO_EXTEND:
6688 case ISD::SIGN_EXTEND:
6689 return isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
6690 case ISD::VSCALE: {
6692 const APInt &Multiplier = Op.getConstantOperandAPInt(0);
6693 ConstantRange CR =
6694 getVScaleRange(&F, Op.getScalarValueSizeInBits()).multiply(Multiplier);
6695 if (!CR.contains(APInt(CR.getBitWidth(), 0)))
6696 return true;
6697 break;
6698 }
6699 }
6700
6702}
6703
6705 if (ConstantFPSDNode *C1 = isConstOrConstSplatFP(Op, true))
6706 return !C1->isNegative();
6707
6708 switch (Op.getOpcode()) {
6709 case ISD::FABS:
6710 case ISD::FEXP:
6711 case ISD::FEXP2:
6712 case ISD::FEXP10:
6713 return true;
6714 default:
6715 return false;
6716 }
6717
6718 llvm_unreachable("covered opcode switch");
6719}
6720
6722 assert(Use.getValueType().isFloatingPoint());
6723 const SDNode *User = Use.getUser();
6724 if (User->getFlags().hasNoSignedZeros())
6725 return true;
6726
6727 unsigned OperandNo = Use.getOperandNo();
6728 // Check if this use is insensitive to the sign of zero
6729 switch (User->getOpcode()) {
6730 case ISD::SETCC:
6731 // Comparisons: IEEE-754 specifies +0.0 == -0.0.
6732 case ISD::FABS:
6733 // fabs always produces +0.0.
6734 return true;
6735 case ISD::FCOPYSIGN:
6736 // copysign overwrites the sign bit of the first operand.
6737 return OperandNo == 0;
6738 case ISD::FADD:
6739 case ISD::FSUB: {
6740 // Arithmetic with non-zero constants fixes the uncertainty around the
6741 // sign bit.
6742 SDValue Other = User->getOperand(1 - OperandNo);
6744 }
6745 case ISD::FP_TO_SINT:
6746 case ISD::FP_TO_UINT:
6747 // fp-to-int conversions normalize signed zeros.
6748 return true;
6749 default:
6750 return false;
6751 }
6752}
6753
6755 if (Op->getFlags().hasNoSignedZeros())
6756 return true;
6757 // FIXME: Limit the amount of checked uses to not introduce a compile-time
6758 // regression. Ideally, this should be implemented as a demanded-bits
6759 // optimization that stems from the users.
6760 if (Op->use_size() > 2)
6761 return false;
6762 return all_of(Op->uses(),
6763 [&](const SDUse &Use) { return canIgnoreSignBitOfZero(Use); });
6764}
6765
6767 // Check the obvious case.
6768 if (A == B) return true;
6769
6770 // For negative and positive zero.
6773 if (CA->isZero() && CB->isZero()) return true;
6774
6775 // Otherwise they may not be equal.
6776 return false;
6777}
6778
6779// Only bits set in Mask must be negated, other bits may be arbitrary.
6781 if (isBitwiseNot(V, AllowUndefs))
6782 return V.getOperand(0);
6783
6784 // Handle any_extend (not (truncate X)) pattern, where Mask only sets
6785 // bits in the non-extended part.
6786 ConstantSDNode *MaskC = isConstOrConstSplat(Mask);
6787 if (!MaskC || V.getOpcode() != ISD::ANY_EXTEND)
6788 return SDValue();
6789 SDValue ExtArg = V.getOperand(0);
6790 if (ExtArg.getScalarValueSizeInBits() >=
6791 MaskC->getAPIntValue().getActiveBits() &&
6792 isBitwiseNot(ExtArg, AllowUndefs) &&
6793 ExtArg.getOperand(0).getOpcode() == ISD::TRUNCATE &&
6794 ExtArg.getOperand(0).getOperand(0).getValueType() == V.getValueType())
6795 return ExtArg.getOperand(0).getOperand(0);
6796 return SDValue();
6797}
6798
6800 // Match masked merge pattern (X & ~M) op (Y & M)
6801 // Including degenerate case (X & ~M) op M
6802 auto MatchNoCommonBitsPattern = [&](SDValue Not, SDValue Mask,
6803 SDValue Other) {
6804 if (SDValue NotOperand =
6805 getBitwiseNotOperand(Not, Mask, /* AllowUndefs */ true)) {
6806 if (NotOperand->getOpcode() == ISD::ZERO_EXTEND ||
6807 NotOperand->getOpcode() == ISD::TRUNCATE)
6808 NotOperand = NotOperand->getOperand(0);
6809
6810 if (Other == NotOperand)
6811 return true;
6812 if (Other->getOpcode() == ISD::AND)
6813 return NotOperand == Other->getOperand(0) ||
6814 NotOperand == Other->getOperand(1);
6815 }
6816 return false;
6817 };
6818
6819 if (A->getOpcode() == ISD::ZERO_EXTEND || A->getOpcode() == ISD::TRUNCATE)
6820 A = A->getOperand(0);
6821
6822 if (B->getOpcode() == ISD::ZERO_EXTEND || B->getOpcode() == ISD::TRUNCATE)
6823 B = B->getOperand(0);
6824
6825 if (A->getOpcode() == ISD::AND)
6826 return MatchNoCommonBitsPattern(A->getOperand(0), A->getOperand(1), B) ||
6827 MatchNoCommonBitsPattern(A->getOperand(1), A->getOperand(0), B);
6828 return false;
6829}
6830
6831// FIXME: unify with llvm::haveNoCommonBitsSet.
6833 assert(A.getValueType() == B.getValueType() &&
6834 "Values must have the same type");
6837 return true;
6840}
6841
6842static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step,
6843 SelectionDAG &DAG) {
6844 if (cast<ConstantSDNode>(Step)->isZero())
6845 return DAG.getConstant(0, DL, VT);
6846
6847 return SDValue();
6848}
6849
6852 SelectionDAG &DAG) {
6853 int NumOps = Ops.size();
6854 assert(NumOps != 0 && "Can't build an empty vector!");
6855 assert(!VT.isScalableVector() &&
6856 "BUILD_VECTOR cannot be used with scalable types");
6857 assert(VT.getVectorNumElements() == (unsigned)NumOps &&
6858 "Incorrect element count in BUILD_VECTOR!");
6859
6860 // BUILD_VECTOR of UNDEFs is UNDEF.
6861 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
6862 return DAG.getUNDEF(VT);
6863
6864 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity.
6865 SDValue IdentitySrc;
6866 bool IsIdentity = true;
6867 for (int i = 0; i != NumOps; ++i) {
6868 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6869 Ops[i].getOperand(0).getValueType() != VT ||
6870 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
6871 !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
6872 Ops[i].getConstantOperandAPInt(1) != i) {
6873 IsIdentity = false;
6874 break;
6875 }
6876 IdentitySrc = Ops[i].getOperand(0);
6877 }
6878 if (IsIdentity)
6879 return IdentitySrc;
6880
6881 return SDValue();
6882}
6883
6884/// Try to simplify vector concatenation to an input value, undef, or build
6885/// vector.
6888 SelectionDAG &DAG) {
6889 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
6891 [Ops](SDValue Op) {
6892 return Ops[0].getValueType() == Op.getValueType();
6893 }) &&
6894 "Concatenation of vectors with inconsistent value types!");
6895 assert((Ops[0].getValueType().getVectorElementCount() * Ops.size()) ==
6896 VT.getVectorElementCount() &&
6897 "Incorrect element count in vector concatenation!");
6898
6899 if (Ops.size() == 1)
6900 return Ops[0];
6901
6902 // Concat of UNDEFs is UNDEF.
6903 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
6904 return DAG.getUNDEF(VT);
6905
6906 // Scan the operands and look for extract operations from a single source
6907 // that correspond to insertion at the same location via this concatenation:
6908 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ...
6909 SDValue IdentitySrc;
6910 bool IsIdentity = true;
6911 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
6912 SDValue Op = Ops[i];
6913 unsigned IdentityIndex = i * Op.getValueType().getVectorMinNumElements();
6914 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
6915 Op.getOperand(0).getValueType() != VT ||
6916 (IdentitySrc && Op.getOperand(0) != IdentitySrc) ||
6917 Op.getConstantOperandVal(1) != IdentityIndex) {
6918 IsIdentity = false;
6919 break;
6920 }
6921 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) &&
6922 "Unexpected identity source vector for concat of extracts");
6923 IdentitySrc = Op.getOperand(0);
6924 }
6925 if (IsIdentity) {
6926 assert(IdentitySrc && "Failed to set source vector of extracts");
6927 return IdentitySrc;
6928 }
6929
6930 // The code below this point is only designed to work for fixed width
6931 // vectors, so we bail out for now.
6932 if (VT.isScalableVector())
6933 return SDValue();
6934
6935 // A CONCAT_VECTOR of scalar sources, such as UNDEF, BUILD_VECTOR and
6936 // single-element INSERT_VECTOR_ELT operands can be simplified to one big
6937 // BUILD_VECTOR.
6938 // FIXME: Add support for SCALAR_TO_VECTOR as well.
6939 EVT SVT = VT.getScalarType();
6941 for (SDValue Op : Ops) {
6942 EVT OpVT = Op.getValueType();
6943 if (Op.isUndef())
6944 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
6945 else if (Op.getOpcode() == ISD::BUILD_VECTOR)
6946 Elts.append(Op->op_begin(), Op->op_end());
6947 else if (Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
6948 OpVT.getVectorNumElements() == 1 &&
6949 isNullConstant(Op.getOperand(2)))
6950 Elts.push_back(Op.getOperand(1));
6951 else
6952 return SDValue();
6953 }
6954
6955 // BUILD_VECTOR requires all inputs to be of the same type, find the
6956 // maximum type and extend them all.
6957 for (SDValue Op : Elts)
6958 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
6959
6960 if (SVT.bitsGT(VT.getScalarType())) {
6961 for (SDValue &Op : Elts) {
6962 if (Op.isUndef())
6963 Op = DAG.getUNDEF(SVT);
6964 else
6965 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
6966 ? DAG.getZExtOrTrunc(Op, DL, SVT)
6967 : DAG.getSExtOrTrunc(Op, DL, SVT);
6968 }
6969 }
6970
6971 SDValue V = DAG.getBuildVector(VT, DL, Elts);
6972 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG);
6973 return V;
6974}
6975
6976/// Gets or creates the specified node.
6977SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
6978 SDVTList VTs = getVTList(VT);
6980 AddNodeIDNode(ID, Opcode, VTs, {});
6981 void *IP = nullptr;
6982 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
6983 return SDValue(E, 0);
6984
6985 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6986 CSEMap.InsertNode(N, IP);
6987
6988 InsertNode(N);
6989 SDValue V = SDValue(N, 0);
6990 NewSDValueDbgMsg(V, "Creating new node: ", this);
6991 return V;
6992}
6993
6994SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6995 SDValue N1) {
6996 SDNodeFlags Flags;
6997 if (Inserter)
6998 Flags = Inserter->getFlags();
6999 return getNode(Opcode, DL, VT, N1, Flags);
7000}
7001
7002SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7003 SDValue N1, const SDNodeFlags Flags) {
7004 assert(N1.getOpcode() != ISD::DELETED_NODE && "Operand is DELETED_NODE!");
7005
7006 // Constant fold unary operations with a vector integer or float operand.
7007 switch (Opcode) {
7008 default:
7009 // FIXME: Entirely reasonable to perform folding of other unary
7010 // operations here as the need arises.
7011 break;
7012 case ISD::FNEG:
7013 case ISD::FABS:
7014 case ISD::FCEIL:
7015 case ISD::FTRUNC:
7016 case ISD::FFLOOR:
7017 case ISD::FP_EXTEND:
7018 case ISD::FP_TO_SINT:
7019 case ISD::FP_TO_UINT:
7020 case ISD::FP_TO_FP16:
7021 case ISD::FP_TO_BF16:
7022 case ISD::TRUNCATE:
7023 case ISD::ANY_EXTEND:
7024 case ISD::ZERO_EXTEND:
7025 case ISD::SIGN_EXTEND:
7026 case ISD::UINT_TO_FP:
7027 case ISD::SINT_TO_FP:
7028 case ISD::FP16_TO_FP:
7029 case ISD::BF16_TO_FP:
7030 case ISD::BITCAST:
7031 case ISD::ABS:
7033 case ISD::BITREVERSE:
7034 case ISD::BSWAP:
7035 case ISD::CTLZ:
7037 case ISD::CTTZ:
7039 case ISD::CTPOP:
7040 case ISD::CTLS:
7041 case ISD::STEP_VECTOR: {
7042 SDValue Ops = {N1};
7043 if (SDValue Fold = FoldConstantArithmetic(Opcode, DL, VT, Ops))
7044 return Fold;
7045 }
7046 }
7047
7048 unsigned OpOpcode = N1.getNode()->getOpcode();
7049 switch (Opcode) {
7050 case ISD::STEP_VECTOR:
7051 assert(VT.isScalableVector() &&
7052 "STEP_VECTOR can only be used with scalable types");
7053 assert(OpOpcode == ISD::TargetConstant &&
7054 VT.getVectorElementType() == N1.getValueType() &&
7055 "Unexpected step operand");
7056 break;
7057 case ISD::FREEZE:
7058 assert(VT == N1.getValueType() && "Unexpected VT!");
7060 return N1;
7061 break;
7062 case ISD::TokenFactor:
7063 case ISD::MERGE_VALUES:
7065 return N1; // Factor, merge or concat of one node? No need.
7066 case ISD::BUILD_VECTOR: {
7067 // Attempt to simplify BUILD_VECTOR.
7068 SDValue Ops[] = {N1};
7069 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
7070 return V;
7071 break;
7072 }
7073 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
7074 case ISD::FP_EXTEND:
7076 "Invalid FP cast!");
7077 if (N1.getValueType() == VT) return N1; // noop conversion.
7078 assert((!VT.isVector() || VT.getVectorElementCount() ==
7080 "Vector element count mismatch!");
7081 assert(N1.getValueType().bitsLT(VT) && "Invalid fpext node, dst < src!");
7082 if (N1.isUndef())
7083 return getUNDEF(VT);
7084 break;
7085 case ISD::FP_TO_SINT:
7086 case ISD::FP_TO_UINT:
7087 if (N1.isUndef())
7088 return getUNDEF(VT);
7089 break;
7090 case ISD::SINT_TO_FP:
7091 case ISD::UINT_TO_FP:
7092 // [us]itofp(undef) = 0, because the result value is bounded.
7093 if (N1.isUndef())
7094 return getConstantFP(0.0, DL, VT);
7095 break;
7096 case ISD::SIGN_EXTEND:
7097 assert(VT.isInteger() && N1.getValueType().isInteger() &&
7098 "Invalid SIGN_EXTEND!");
7099 assert(VT.isVector() == N1.getValueType().isVector() &&
7100 "SIGN_EXTEND result type type should be vector iff the operand "
7101 "type is vector!");
7102 if (N1.getValueType() == VT) return N1; // noop extension
7103 assert((!VT.isVector() || VT.getVectorElementCount() ==
7105 "Vector element count mismatch!");
7106 assert(N1.getValueType().bitsLT(VT) && "Invalid sext node, dst < src!");
7107 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) {
7108 SDNodeFlags Flags;
7109 if (OpOpcode == ISD::ZERO_EXTEND)
7110 Flags.setNonNeg(N1->getFlags().hasNonNeg());
7111 SDValue NewVal = getNode(OpOpcode, DL, VT, N1.getOperand(0), Flags);
7112 transferDbgValues(N1, NewVal);
7113 return NewVal;
7114 }
7115
7116 if (OpOpcode == ISD::POISON)
7117 return getPOISON(VT);
7118
7119 if (N1.isUndef())
7120 // sext(undef) = 0, because the top bits will all be the same.
7121 return getConstant(0, DL, VT);
7122
7123 // Skip unnecessary sext_inreg pattern:
7124 // (sext (trunc x)) -> x iff the upper bits are all signbits.
7125 if (OpOpcode == ISD::TRUNCATE) {
7126 SDValue OpOp = N1.getOperand(0);
7127 if (OpOp.getValueType() == VT) {
7128 unsigned NumSignExtBits =
7130 if (ComputeNumSignBits(OpOp) > NumSignExtBits) {
7131 transferDbgValues(N1, OpOp);
7132 return OpOp;
7133 }
7134 }
7135 }
7136 break;
7137 case ISD::ZERO_EXTEND:
7138 assert(VT.isInteger() && N1.getValueType().isInteger() &&
7139 "Invalid ZERO_EXTEND!");
7140 assert(VT.isVector() == N1.getValueType().isVector() &&
7141 "ZERO_EXTEND result type type should be vector iff the operand "
7142 "type is vector!");
7143 if (N1.getValueType() == VT) return N1; // noop extension
7144 assert((!VT.isVector() || VT.getVectorElementCount() ==
7146 "Vector element count mismatch!");
7147 assert(N1.getValueType().bitsLT(VT) && "Invalid zext node, dst < src!");
7148 if (OpOpcode == ISD::ZERO_EXTEND) { // (zext (zext x)) -> (zext x)
7149 SDNodeFlags Flags;
7150 Flags.setNonNeg(N1->getFlags().hasNonNeg());
7151 SDValue NewVal =
7152 getNode(ISD::ZERO_EXTEND, DL, VT, N1.getOperand(0), Flags);
7153 transferDbgValues(N1, NewVal);
7154 return NewVal;
7155 }
7156
7157 if (OpOpcode == ISD::POISON)
7158 return getPOISON(VT);
7159
7160 if (N1.isUndef())
7161 // zext(undef) = 0, because the top bits will be zero.
7162 return getConstant(0, DL, VT);
7163
7164 // Skip unnecessary zext_inreg pattern:
7165 // (zext (trunc x)) -> x iff the upper bits are known zero.
7166 // TODO: Remove (zext (trunc (and x, c))) exception which some targets
7167 // use to recognise zext_inreg patterns.
7168 if (OpOpcode == ISD::TRUNCATE) {
7169 SDValue OpOp = N1.getOperand(0);
7170 if (OpOp.getValueType() == VT) {
7171 if (OpOp.getOpcode() != ISD::AND) {
7174 if (MaskedValueIsZero(OpOp, HiBits)) {
7175 transferDbgValues(N1, OpOp);
7176 return OpOp;
7177 }
7178 }
7179 }
7180 }
7181 break;
7182 case ISD::ANY_EXTEND:
7183 assert(VT.isInteger() && N1.getValueType().isInteger() &&
7184 "Invalid ANY_EXTEND!");
7185 assert(VT.isVector() == N1.getValueType().isVector() &&
7186 "ANY_EXTEND result type type should be vector iff the operand "
7187 "type is vector!");
7188 if (N1.getValueType() == VT) return N1; // noop extension
7189 assert((!VT.isVector() || VT.getVectorElementCount() ==
7191 "Vector element count mismatch!");
7192 assert(N1.getValueType().bitsLT(VT) && "Invalid anyext node, dst < src!");
7193
7194 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
7195 OpOpcode == ISD::ANY_EXTEND) {
7196 SDNodeFlags Flags;
7197 if (OpOpcode == ISD::ZERO_EXTEND)
7198 Flags.setNonNeg(N1->getFlags().hasNonNeg());
7199 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
7200 return getNode(OpOpcode, DL, VT, N1.getOperand(0), Flags);
7201 }
7202 if (N1.isUndef())
7203 return getUNDEF(VT);
7204
7205 // (ext (trunc x)) -> x
7206 if (OpOpcode == ISD::TRUNCATE) {
7207 SDValue OpOp = N1.getOperand(0);
7208 if (OpOp.getValueType() == VT) {
7209 transferDbgValues(N1, OpOp);
7210 return OpOp;
7211 }
7212 }
7213 break;
7214 case ISD::TRUNCATE:
7215 assert(VT.isInteger() && N1.getValueType().isInteger() &&
7216 "Invalid TRUNCATE!");
7217 assert(VT.isVector() == N1.getValueType().isVector() &&
7218 "TRUNCATE result type type should be vector iff the operand "
7219 "type is vector!");
7220 if (N1.getValueType() == VT) return N1; // noop truncate
7221 assert((!VT.isVector() || VT.getVectorElementCount() ==
7223 "Vector element count mismatch!");
7224 assert(N1.getValueType().bitsGT(VT) && "Invalid truncate node, src < dst!");
7225 if (OpOpcode == ISD::TRUNCATE)
7226 return getNode(ISD::TRUNCATE, DL, VT, N1.getOperand(0));
7227 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
7228 OpOpcode == ISD::ANY_EXTEND) {
7229 // If the source is smaller than the dest, we still need an extend.
7231 VT.getScalarType())) {
7232 SDNodeFlags Flags;
7233 if (OpOpcode == ISD::ZERO_EXTEND)
7234 Flags.setNonNeg(N1->getFlags().hasNonNeg());
7235 return getNode(OpOpcode, DL, VT, N1.getOperand(0), Flags);
7236 }
7237 if (N1.getOperand(0).getValueType().bitsGT(VT))
7238 return getNode(ISD::TRUNCATE, DL, VT, N1.getOperand(0));
7239 return N1.getOperand(0);
7240 }
7241 if (N1.isUndef())
7242 return getUNDEF(VT);
7243 if (OpOpcode == ISD::VSCALE && !NewNodesMustHaveLegalTypes)
7244 return getVScale(DL, VT,
7246 break;
7250 assert(VT.isVector() && "This DAG node is restricted to vector types.");
7251 assert(N1.getValueType().bitsLE(VT) &&
7252 "The input must be the same size or smaller than the result.");
7255 "The destination vector type must have fewer lanes than the input.");
7256 break;
7257 case ISD::ABS:
7258 assert(VT.isInteger() && VT == N1.getValueType() && "Invalid ABS!");
7259 if (N1.isUndef())
7260 return getConstant(0, DL, VT);
7261 break;
7263 assert(VT.isInteger() && VT == N1.getValueType() &&
7264 "Invalid ABS_MIN_POISON!");
7265 if (N1.isUndef())
7266 return getConstant(0, DL, VT);
7267 break;
7268 case ISD::BSWAP:
7269 assert(VT.isInteger() && VT == N1.getValueType() && "Invalid BSWAP!");
7270 assert((VT.getScalarSizeInBits() % 16 == 0) &&
7271 "BSWAP types must be a multiple of 16 bits!");
7272 if (N1.isUndef())
7273 return getUNDEF(VT);
7274 // bswap(bswap(X)) -> X.
7275 if (OpOpcode == ISD::BSWAP)
7276 return N1.getOperand(0);
7277 break;
7278 case ISD::BITREVERSE:
7279 assert(VT.isInteger() && VT == N1.getValueType() && "Invalid BITREVERSE!");
7280 if (N1.isUndef())
7281 return getUNDEF(VT);
7282 break;
7283 case ISD::BITCAST:
7285 "Cannot BITCAST between types of different sizes!");
7286 if (VT == N1.getValueType()) return N1; // noop conversion.
7287 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
7288 return getNode(ISD::BITCAST, DL, VT, N1.getOperand(0));
7289 if (N1.isUndef())
7290 return getUNDEF(VT);
7291 break;
7293 assert(VT.isVector() && !N1.getValueType().isVector() &&
7294 (VT.getVectorElementType() == N1.getValueType() ||
7296 N1.getValueType().isInteger() &&
7298 "Illegal SCALAR_TO_VECTOR node!");
7299 if (N1.isUndef())
7300 return getUNDEF(VT);
7301 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
7302 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
7304 N1.getConstantOperandVal(1) == 0 &&
7305 N1.getOperand(0).getValueType() == VT)
7306 return N1.getOperand(0);
7307 break;
7308 case ISD::FNEG:
7309 // Negation of an unknown bag of bits is still completely undefined.
7310 if (N1.isUndef())
7311 return getUNDEF(VT);
7312
7313 if (OpOpcode == ISD::FNEG) // --X -> X
7314 return N1.getOperand(0);
7315 break;
7316 case ISD::FABS:
7317 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
7318 return getNode(ISD::FABS, DL, VT, N1.getOperand(0));
7319 break;
7320 case ISD::VSCALE:
7321 assert(VT == N1.getValueType() && "Unexpected VT!");
7322 break;
7323 case ISD::CTPOP:
7324 if (N1.getValueType().getScalarType() == MVT::i1)
7325 return N1;
7326 break;
7327 case ISD::CTLZ:
7328 case ISD::CTTZ:
7329 if (N1.getValueType().getScalarType() == MVT::i1)
7330 return getNOT(DL, N1, N1.getValueType());
7331 break;
7332 case ISD::CTLS:
7333 if (N1.getValueType().getScalarType() == MVT::i1)
7334 return getConstant(0, DL, VT);
7335 break;
7336 case ISD::VECREDUCE_ADD:
7337 if (N1.getValueType().getScalarType() == MVT::i1)
7338 return getNode(ISD::VECREDUCE_XOR, DL, VT, N1);
7339 break;
7342 if (N1.getValueType().getScalarType() == MVT::i1)
7343 return getNode(ISD::VECREDUCE_OR, DL, VT, N1);
7344 break;
7347 if (N1.getValueType().getScalarType() == MVT::i1)
7348 return getNode(ISD::VECREDUCE_AND, DL, VT, N1);
7349 break;
7350 case ISD::SPLAT_VECTOR:
7351 assert(VT.isVector() && "Wrong return type!");
7352 // FIXME: Hexagon uses i32 scalar for a floating point zero vector so allow
7353 // that for now.
7355 (VT.isFloatingPoint() && N1.getValueType() == MVT::i32) ||
7357 N1.getValueType().isInteger() &&
7359 "Wrong operand type!");
7360 break;
7361 }
7362
7363 SDNode *N;
7364 SDVTList VTs = getVTList(VT);
7365 SDValue Ops[] = {N1};
7366 if (VT != MVT::Glue) { // Don't CSE glue producing nodes
7368 AddNodeIDNode(ID, Opcode, VTs, Ops);
7369 void *IP = nullptr;
7370 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
7371 E->intersectFlagsWith(Flags);
7372 return SDValue(E, 0);
7373 }
7374
7375 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7376 N->setFlags(Flags);
7377 createOperands(N, Ops);
7378 CSEMap.InsertNode(N, IP);
7379 } else {
7380 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7381 createOperands(N, Ops);
7382 }
7383
7384 InsertNode(N);
7385 SDValue V = SDValue(N, 0);
7386 NewSDValueDbgMsg(V, "Creating new node: ", this);
7387 return V;
7388}
7389
7390static std::optional<APInt> FoldValue(unsigned Opcode, const APInt &C1,
7391 const APInt &C2) {
7392 switch (Opcode) {
7393 case ISD::ADD: return C1 + C2;
7394 case ISD::SUB: return C1 - C2;
7395 case ISD::MUL: return C1 * C2;
7396 case ISD::AND: return C1 & C2;
7397 case ISD::OR: return C1 | C2;
7398 case ISD::XOR: return C1 ^ C2;
7399 case ISD::SHL: return C1 << C2;
7400 case ISD::SRL: return C1.lshr(C2);
7401 case ISD::SRA: return C1.ashr(C2);
7402 case ISD::ROTL: return C1.rotl(C2);
7403 case ISD::ROTR: return C1.rotr(C2);
7404 case ISD::SMIN: return C1.sle(C2) ? C1 : C2;
7405 case ISD::SMAX: return C1.sge(C2) ? C1 : C2;
7406 case ISD::UMIN: return C1.ule(C2) ? C1 : C2;
7407 case ISD::UMAX: return C1.uge(C2) ? C1 : C2;
7408 case ISD::SADDSAT: return C1.sadd_sat(C2);
7409 case ISD::UADDSAT: return C1.uadd_sat(C2);
7410 case ISD::SSUBSAT: return C1.ssub_sat(C2);
7411 case ISD::USUBSAT: return C1.usub_sat(C2);
7412 case ISD::SSHLSAT: return C1.sshl_sat(C2);
7413 case ISD::USHLSAT: return C1.ushl_sat(C2);
7414 case ISD::UDIV:
7415 if (!C2.getBoolValue())
7416 break;
7417 return C1.udiv(C2);
7418 case ISD::UREM:
7419 if (!C2.getBoolValue())
7420 break;
7421 return C1.urem(C2);
7422 case ISD::SDIV:
7423 if (!C2.getBoolValue())
7424 break;
7425 return C1.sdiv(C2);
7426 case ISD::SREM:
7427 if (!C2.getBoolValue())
7428 break;
7429 return C1.srem(C2);
7430 case ISD::AVGFLOORS:
7431 return APIntOps::avgFloorS(C1, C2);
7432 case ISD::AVGFLOORU:
7433 return APIntOps::avgFloorU(C1, C2);
7434 case ISD::AVGCEILS:
7435 return APIntOps::avgCeilS(C1, C2);
7436 case ISD::AVGCEILU:
7437 return APIntOps::avgCeilU(C1, C2);
7438 case ISD::ABDS:
7439 return APIntOps::abds(C1, C2);
7440 case ISD::ABDU:
7441 return APIntOps::abdu(C1, C2);
7442 case ISD::MULHS:
7443 return APIntOps::mulhs(C1, C2);
7444 case ISD::MULHU:
7445 return APIntOps::mulhu(C1, C2);
7446 case ISD::CLMUL:
7447 return APIntOps::clmul(C1, C2);
7448 case ISD::CLMULR:
7449 return APIntOps::clmulr(C1, C2);
7450 case ISD::CLMULH:
7451 return APIntOps::clmulh(C1, C2);
7452 }
7453 return std::nullopt;
7454}
7455// Handle constant folding with UNDEF.
7456// TODO: Handle more cases.
7457static std::optional<APInt> FoldValueWithUndef(unsigned Opcode, const APInt &C1,
7458 bool IsUndef1, const APInt &C2,
7459 bool IsUndef2) {
7460 if (!(IsUndef1 || IsUndef2))
7461 return FoldValue(Opcode, C1, C2);
7462
7463 // Fold and(x, undef) -> 0
7464 // Fold mul(x, undef) -> 0
7465 if (Opcode == ISD::AND || Opcode == ISD::MUL)
7466 return APInt::getZero(C1.getBitWidth());
7467
7468 return std::nullopt;
7469}
7470
7472 const GlobalAddressSDNode *GA,
7473 const SDNode *N2) {
7474 if (GA->getOpcode() != ISD::GlobalAddress)
7475 return SDValue();
7476 if (!TLI->isOffsetFoldingLegal(GA))
7477 return SDValue();
7478 auto *C2 = dyn_cast<ConstantSDNode>(N2);
7479 if (!C2)
7480 return SDValue();
7481 int64_t Offset = C2->getSExtValue();
7482 switch (Opcode) {
7483 case ISD::ADD:
7484 case ISD::PTRADD:
7485 break;
7486 case ISD::SUB: Offset = -uint64_t(Offset); break;
7487 default: return SDValue();
7488 }
7489 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT,
7490 GA->getOffset() + uint64_t(Offset));
7491}
7492
7494 switch (Opcode) {
7495 case ISD::SDIV:
7496 case ISD::UDIV:
7497 case ISD::SREM:
7498 case ISD::UREM: {
7499 // If a divisor is zero/undef or any element of a divisor vector is
7500 // zero/undef, the whole op is undef.
7501 assert(Ops.size() == 2 && "Div/rem should have 2 operands");
7502 SDValue Divisor = Ops[1];
7503 if (Divisor.isUndef() || isNullConstant(Divisor))
7504 return true;
7505
7506 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
7507 llvm::any_of(Divisor->op_values(),
7508 [](SDValue V) { return V.isUndef() ||
7509 isNullConstant(V); });
7510 // TODO: Handle signed overflow.
7511 }
7512 // TODO: Handle oversized shifts.
7513 default:
7514 return false;
7515 }
7516}
7517
7520 SDNodeFlags Flags) {
7521 // If the opcode is a target-specific ISD node, there's nothing we can
7522 // do here and the operand rules may not line up with the below, so
7523 // bail early.
7524 // We can't create a scalar CONCAT_VECTORS so skip it. It will break
7525 // for concats involving SPLAT_VECTOR. Concats of BUILD_VECTORS are handled by
7526 // foldCONCAT_VECTORS in getNode before this is called.
7527 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::CONCAT_VECTORS)
7528 return SDValue();
7529
7530 unsigned NumOps = Ops.size();
7531 if (NumOps == 0)
7532 return SDValue();
7533
7534 if (isUndef(Opcode, Ops))
7535 return getUNDEF(VT);
7536
7537 // Handle unary special cases.
7538 if (NumOps == 1) {
7539 SDValue N1 = Ops[0];
7540
7541 // Constant fold unary operations with an integer constant operand. Even
7542 // opaque constant will be folded, because the folding of unary operations
7543 // doesn't create new constants with different values. Nevertheless, the
7544 // opaque flag is preserved during folding to prevent future folding with
7545 // other constants.
7546 if (auto *C = dyn_cast<ConstantSDNode>(N1)) {
7547 const APInt &Val = C->getAPIntValue();
7548 switch (Opcode) {
7549 case ISD::SIGN_EXTEND:
7550 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
7551 C->isTargetOpcode(), C->isOpaque());
7552 case ISD::TRUNCATE:
7553 if (C->isOpaque())
7554 break;
7555 [[fallthrough]];
7556 case ISD::ZERO_EXTEND:
7557 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
7558 C->isTargetOpcode(), C->isOpaque());
7559 case ISD::ANY_EXTEND:
7560 // Some targets like RISCV prefer to sign extend some types.
7561 if (TLI->isSExtCheaperThanZExt(N1.getValueType(), VT))
7562 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
7563 C->isTargetOpcode(), C->isOpaque());
7564 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
7565 C->isTargetOpcode(), C->isOpaque());
7566 case ISD::ABS:
7567 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(),
7568 C->isOpaque());
7570 if (Val.isMinSignedValue())
7571 return getPOISON(VT);
7572 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(),
7573 C->isOpaque());
7574 case ISD::BITREVERSE:
7575 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(),
7576 C->isOpaque());
7577 case ISD::BSWAP:
7578 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
7579 C->isOpaque());
7580 case ISD::CTPOP:
7581 return getConstant(Val.popcount(), DL, VT, C->isTargetOpcode(),
7582 C->isOpaque());
7583 case ISD::CTLZ:
7585 return getConstant(Val.countl_zero(), DL, VT, C->isTargetOpcode(),
7586 C->isOpaque());
7587 case ISD::CTTZ:
7589 return getConstant(Val.countr_zero(), DL, VT, C->isTargetOpcode(),
7590 C->isOpaque());
7591 case ISD::CTLS:
7592 // CTLS returns the number of extra sign bits so subtract one.
7593 return getConstant(Val.getNumSignBits() - 1, DL, VT,
7594 C->isTargetOpcode(), C->isOpaque());
7595 case ISD::UINT_TO_FP:
7596 case ISD::SINT_TO_FP: {
7598 (void)FPV.convertFromAPInt(Val, Opcode == ISD::SINT_TO_FP,
7600 return getConstantFP(FPV, DL, VT);
7601 }
7602 case ISD::FP16_TO_FP:
7603 case ISD::BF16_TO_FP: {
7604 bool Ignored;
7605 APFloat FPV(Opcode == ISD::FP16_TO_FP ? APFloat::IEEEhalf()
7606 : APFloat::BFloat(),
7607 (Val.getBitWidth() == 16) ? Val : Val.trunc(16));
7608
7609 // This can return overflow, underflow, or inexact; we don't care.
7610 // FIXME need to be more flexible about rounding mode.
7612 &Ignored);
7613 return getConstantFP(FPV, DL, VT);
7614 }
7615 case ISD::STEP_VECTOR:
7616 if (SDValue V = FoldSTEP_VECTOR(DL, VT, N1, *this))
7617 return V;
7618 break;
7619 case ISD::BITCAST:
7620 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
7621 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
7622 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
7623 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
7624 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
7625 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
7626 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
7627 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
7628 break;
7629 }
7630 }
7631
7632 // Constant fold unary operations with a floating point constant operand.
7633 if (auto *C = dyn_cast<ConstantFPSDNode>(N1)) {
7634 APFloat V = C->getValueAPF(); // make copy
7635 switch (Opcode) {
7636 case ISD::FNEG:
7637 V.changeSign();
7638 return getConstantFP(V, DL, VT);
7639 case ISD::FABS:
7640 V.clearSign();
7641 return getConstantFP(V, DL, VT);
7642 case ISD::FCEIL: {
7643 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
7645 return getConstantFP(V, DL, VT);
7646 return SDValue();
7647 }
7648 case ISD::FTRUNC: {
7649 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
7651 return getConstantFP(V, DL, VT);
7652 return SDValue();
7653 }
7654 case ISD::FFLOOR: {
7655 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
7657 return getConstantFP(V, DL, VT);
7658 return SDValue();
7659 }
7660 case ISD::FP_EXTEND: {
7661 bool ignored;
7662 // This can return overflow, underflow, or inexact; we don't care.
7663 // FIXME need to be more flexible about rounding mode.
7664 (void)V.convert(VT.getFltSemantics(), APFloat::rmNearestTiesToEven,
7665 &ignored);
7666 return getConstantFP(V, DL, VT);
7667 }
7668 case ISD::FP_TO_SINT:
7669 case ISD::FP_TO_UINT: {
7670 bool ignored;
7671 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT);
7672 // FIXME need to be more flexible about rounding mode.
7674 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored);
7675 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual
7676 break;
7677 return getConstant(IntVal, DL, VT);
7678 }
7679 case ISD::FP_TO_FP16:
7680 case ISD::FP_TO_BF16: {
7681 bool Ignored;
7682 // This can return overflow, underflow, or inexact; we don't care.
7683 // FIXME need to be more flexible about rounding mode.
7684 (void)V.convert(Opcode == ISD::FP_TO_FP16 ? APFloat::IEEEhalf()
7685 : APFloat::BFloat(),
7687 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
7688 }
7689 case ISD::BITCAST:
7690 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
7691 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL,
7692 VT);
7693 if (VT == MVT::i16 && C->getValueType(0) == MVT::bf16)
7694 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL,
7695 VT);
7696 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
7697 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL,
7698 VT);
7699 if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
7700 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
7701 break;
7702 }
7703 }
7704
7705 // Early-out if we failed to constant fold a bitcast.
7706 if (Opcode == ISD::BITCAST)
7707 return SDValue();
7708 }
7709
7710 // Handle binops special cases.
7711 if (NumOps == 2) {
7712 if (SDValue CFP = foldConstantFPMath(Opcode, DL, VT, Ops))
7713 return CFP;
7714
7715 if (auto *C1 = dyn_cast<ConstantSDNode>(Ops[0])) {
7716 if (auto *C2 = dyn_cast<ConstantSDNode>(Ops[1])) {
7717 if (C1->isOpaque() || C2->isOpaque())
7718 return SDValue();
7719
7720 std::optional<APInt> FoldAttempt =
7721 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
7722 if (!FoldAttempt)
7723 return SDValue();
7724
7725 SDValue Folded = getConstant(*FoldAttempt, DL, VT);
7726 assert((!Folded || !VT.isVector()) &&
7727 "Can't fold vectors ops with scalar operands");
7728 return Folded;
7729 }
7730 }
7731
7732 // fold (add Sym, c) -> Sym+c
7734 return FoldSymbolOffset(Opcode, VT, GA, Ops[1].getNode());
7735 if (TLI->isCommutativeBinOp(Opcode))
7737 return FoldSymbolOffset(Opcode, VT, GA, Ops[0].getNode());
7738
7739 // fold (sext_in_reg c1) -> c2
7740 if (Opcode == ISD::SIGN_EXTEND_INREG) {
7741 EVT EVT = cast<VTSDNode>(Ops[1])->getVT();
7742
7743 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) {
7744 unsigned FromBits = EVT.getScalarSizeInBits();
7745 Val <<= Val.getBitWidth() - FromBits;
7746 Val.ashrInPlace(Val.getBitWidth() - FromBits);
7747 return getConstant(Val, DL, ConstantVT);
7748 };
7749
7750 if (auto *C1 = dyn_cast<ConstantSDNode>(Ops[0])) {
7751 const APInt &Val = C1->getAPIntValue();
7752 return SignExtendInReg(Val, VT);
7753 }
7754
7756 SmallVector<SDValue, 8> ScalarOps;
7757 llvm::EVT OpVT = Ops[0].getOperand(0).getValueType();
7758 for (int I = 0, E = VT.getVectorNumElements(); I != E; ++I) {
7759 SDValue Op = Ops[0].getOperand(I);
7760 if (Op.isUndef()) {
7761 ScalarOps.push_back(getUNDEF(OpVT));
7762 continue;
7763 }
7764 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
7765 ScalarOps.push_back(SignExtendInReg(Val, OpVT));
7766 }
7767 return getBuildVector(VT, DL, ScalarOps);
7768 }
7769
7770 if (Ops[0].getOpcode() == ISD::SPLAT_VECTOR &&
7771 isa<ConstantSDNode>(Ops[0].getOperand(0)))
7772 return getNode(ISD::SPLAT_VECTOR, DL, VT,
7773 SignExtendInReg(Ops[0].getConstantOperandAPInt(0),
7774 Ops[0].getOperand(0).getValueType()));
7775 }
7776 }
7777
7778 // Handle fshl/fshr special cases.
7779 if (Opcode == ISD::FSHL || Opcode == ISD::FSHR) {
7780 auto *C1 = dyn_cast<ConstantSDNode>(Ops[0]);
7781 auto *C2 = dyn_cast<ConstantSDNode>(Ops[1]);
7782 auto *C3 = dyn_cast<ConstantSDNode>(Ops[2]);
7783
7784 if (C1 && C2 && C3) {
7785 if (C1->isOpaque() || C2->isOpaque() || C3->isOpaque())
7786 return SDValue();
7787 const APInt &V1 = C1->getAPIntValue(), &V2 = C2->getAPIntValue(),
7788 &V3 = C3->getAPIntValue();
7789
7790 APInt FoldedVal = Opcode == ISD::FSHL ? APIntOps::fshl(V1, V2, V3)
7791 : APIntOps::fshr(V1, V2, V3);
7792 return getConstant(FoldedVal, DL, VT);
7793 }
7794 }
7795
7796 // Handle fma/fmad special cases.
7797 if (Opcode == ISD::FMA || Opcode == ISD::FMAD || Opcode == ISD::FMULADD) {
7798 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
7799 assert(Ops[0].getValueType() == VT && Ops[1].getValueType() == VT &&
7800 Ops[2].getValueType() == VT && "FMA types must match!");
7804 if (C1 && C2 && C3) {
7805 APFloat V1 = C1->getValueAPF();
7806 const APFloat &V2 = C2->getValueAPF();
7807 const APFloat &V3 = C3->getValueAPF();
7808 if (Opcode == ISD::FMAD || Opcode == ISD::FMULADD) {
7811 } else
7813 return getConstantFP(V1, DL, VT);
7814 }
7815 }
7816
7817 // This is for vector folding only from here on.
7818 if (!VT.isVector())
7819 return SDValue();
7820
7821 ElementCount NumElts = VT.getVectorElementCount();
7822
7823 // See if we can fold through any bitcasted integer ops.
7824 if (NumOps == 2 && VT.isFixedLengthVector() && VT.isInteger() &&
7825 Ops[0].getValueType() == VT && Ops[1].getValueType() == VT &&
7826 (Ops[0].getOpcode() == ISD::BITCAST ||
7827 Ops[1].getOpcode() == ISD::BITCAST)) {
7830 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
7831 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2);
7832 if (BV1 && BV2 && N1.getValueType().isInteger() &&
7833 N2.getValueType().isInteger()) {
7834 bool IsLE = getDataLayout().isLittleEndian();
7835 unsigned EltBits = VT.getScalarSizeInBits();
7836 SmallVector<APInt> RawBits1, RawBits2;
7837 BitVector UndefElts1, UndefElts2;
7838 if (BV1->getConstantRawBits(IsLE, EltBits, RawBits1, UndefElts1) &&
7839 BV2->getConstantRawBits(IsLE, EltBits, RawBits2, UndefElts2)) {
7840 SmallVector<APInt> RawBits;
7841 for (unsigned I = 0, E = NumElts.getFixedValue(); I != E; ++I) {
7842 std::optional<APInt> Fold = FoldValueWithUndef(
7843 Opcode, RawBits1[I], UndefElts1[I], RawBits2[I], UndefElts2[I]);
7844 if (!Fold)
7845 break;
7846 RawBits.push_back(*Fold);
7847 }
7848 if (RawBits.size() == NumElts.getFixedValue()) {
7849 // We have constant folded, but we might need to cast this again back
7850 // to the original (possibly legalized) type.
7851 EVT BVVT, BVEltVT;
7852 if (N1.getValueType() == VT) {
7853 BVVT = N1.getValueType();
7854 BVEltVT = BV1->getOperand(0).getValueType();
7855 } else {
7856 BVVT = N2.getValueType();
7857 BVEltVT = BV2->getOperand(0).getValueType();
7858 }
7859 unsigned BVEltBits = BVEltVT.getSizeInBits();
7860 SmallVector<APInt> DstBits;
7861 BitVector DstUndefs;
7863 DstBits, RawBits, DstUndefs,
7864 BitVector(RawBits.size(), false));
7865 SmallVector<SDValue> Ops(DstBits.size(), getUNDEF(BVEltVT));
7866 for (unsigned I = 0, E = DstBits.size(); I != E; ++I) {
7867 if (DstUndefs[I])
7868 continue;
7869 Ops[I] = getConstant(DstBits[I].sext(BVEltBits), DL, BVEltVT);
7870 }
7871 return getBitcast(VT, getBuildVector(BVVT, DL, Ops));
7872 }
7873 }
7874 }
7875 // Logic ops can be folded from raw integer bits - mainly for AVX512 masks.
7876 if (ISD::isBitwiseLogicOp(Opcode) && isa<ConstantSDNode>(N1) &&
7877 isa<ConstantSDNode>(N2)) {
7878 if (SDValue Res = FoldConstantArithmetic(Opcode, DL, N1.getValueType(),
7879 {N1, N2}, Flags))
7880 return getBitcast(VT, Res);
7881 }
7882 }
7883
7884 // Fold (mul step_vector(C0), C1) to (step_vector(C0 * C1)).
7885 // (shl step_vector(C0), C1) -> (step_vector(C0 << C1))
7886 if ((Opcode == ISD::MUL || Opcode == ISD::SHL) &&
7887 Ops[0].getOpcode() == ISD::STEP_VECTOR) {
7888 APInt RHSVal;
7889 if (ISD::isConstantSplatVector(Ops[1].getNode(), RHSVal)) {
7890 APInt NewStep = Opcode == ISD::MUL
7891 ? Ops[0].getConstantOperandAPInt(0) * RHSVal
7892 : Ops[0].getConstantOperandAPInt(0) << RHSVal;
7893 return getStepVector(DL, VT, NewStep);
7894 }
7895 }
7896
7897 auto IsScalarOrSameVectorSize = [NumElts](const SDValue &Op) {
7898 return !Op.getValueType().isVector() ||
7899 Op.getValueType().getVectorElementCount() == NumElts;
7900 };
7901
7902 auto IsBuildVectorSplatVectorOrUndef = [](const SDValue &Op) {
7903 return Op.isUndef() || Op.getOpcode() == ISD::CONDCODE ||
7904 Op.getOpcode() == ISD::BUILD_VECTOR ||
7905 Op.getOpcode() == ISD::SPLAT_VECTOR;
7906 };
7907
7908 // All operands must be vector types with the same number of elements as
7909 // the result type and must be either UNDEF or a build/splat vector
7910 // or UNDEF scalars.
7911 if (!llvm::all_of(Ops, IsBuildVectorSplatVectorOrUndef) ||
7912 !llvm::all_of(Ops, IsScalarOrSameVectorSize))
7913 return SDValue();
7914
7915 // If we are comparing vectors, then the result needs to be a i1 boolean that
7916 // is then extended back to the legal result type depending on how booleans
7917 // are represented.
7918 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
7919 ISD::NodeType ExtendCode =
7920 (Opcode == ISD::SETCC && SVT != VT.getScalarType())
7921 ? TargetLowering::getExtendForContent(TLI->getBooleanContents(VT))
7923
7924 // Find legal integer scalar type for constant promotion and
7925 // ensure that its scalar size is at least as large as source.
7926 EVT LegalSVT = VT.getScalarType();
7927 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
7928 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
7929 if (LegalSVT.bitsLT(VT.getScalarType()))
7930 return SDValue();
7931 }
7932
7933 // For scalable vector types we know we're dealing with SPLAT_VECTORs. We
7934 // only have one operand to check. For fixed-length vector types we may have
7935 // a combination of BUILD_VECTOR and SPLAT_VECTOR.
7936 unsigned NumVectorElts = NumElts.isScalable() ? 1 : NumElts.getFixedValue();
7937
7938 // Constant fold each scalar lane separately.
7939 SmallVector<SDValue, 4> ScalarResults;
7940 for (unsigned I = 0; I != NumVectorElts; I++) {
7941 SmallVector<SDValue, 4> ScalarOps;
7942 for (SDValue Op : Ops) {
7943 EVT InSVT = Op.getValueType().getScalarType();
7944 if (Op.getOpcode() != ISD::BUILD_VECTOR &&
7945 Op.getOpcode() != ISD::SPLAT_VECTOR) {
7946 if (Op.isUndef())
7947 ScalarOps.push_back(getUNDEF(InSVT));
7948 else
7949 ScalarOps.push_back(Op);
7950 continue;
7951 }
7952
7953 SDValue ScalarOp =
7954 Op.getOperand(Op.getOpcode() == ISD::SPLAT_VECTOR ? 0 : I);
7955 EVT ScalarVT = ScalarOp.getValueType();
7956
7957 // Build vector (integer) scalar operands may need implicit
7958 // truncation - do this before constant folding.
7959 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) {
7960 // Don't create illegally-typed nodes unless they're constants or undef
7961 // - if we fail to constant fold we can't guarantee the (dead) nodes
7962 // we're creating will be cleaned up before being visited for
7963 // legalization.
7964 if (NewNodesMustHaveLegalTypes && !ScalarOp.isUndef() &&
7965 !isa<ConstantSDNode>(ScalarOp) &&
7966 TLI->getTypeAction(*getContext(), InSVT) !=
7968 return SDValue();
7969 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
7970 }
7971
7972 ScalarOps.push_back(ScalarOp);
7973 }
7974
7975 // Constant fold the scalar operands.
7976 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
7977
7978 // Scalar folding only succeeded if the result is a constant or UNDEF.
7979 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
7980 ScalarResult.getOpcode() != ISD::ConstantFP)
7981 return SDValue();
7982
7983 // Legalize the (integer) scalar constant if necessary. We only do
7984 // this once we know the folding succeeded, since otherwise we would
7985 // get a node with illegal type which has a user.
7986 if (LegalSVT != SVT)
7987 ScalarResult = getNode(ExtendCode, DL, LegalSVT, ScalarResult);
7988
7989 ScalarResults.push_back(ScalarResult);
7990 }
7991
7992 SDValue V = NumElts.isScalable() ? getSplatVector(VT, DL, ScalarResults[0])
7993 : getBuildVector(VT, DL, ScalarResults);
7994 NewSDValueDbgMsg(V, "New node fold constant vector: ", this);
7995 return V;
7996}
7997
8000 // TODO: Add support for unary/ternary fp opcodes.
8001 if (Ops.size() != 2)
8002 return SDValue();
8003
8004 // TODO: We don't do any constant folding for strict FP opcodes here, but we
8005 // should. That will require dealing with a potentially non-default
8006 // rounding mode, checking the "opStatus" return value from the APFloat
8007 // math calculations, and possibly other variations.
8008 SDValue N1 = Ops[0];
8009 SDValue N2 = Ops[1];
8010 ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1, /*AllowUndefs*/ false);
8011 ConstantFPSDNode *N2CFP = isConstOrConstSplatFP(N2, /*AllowUndefs*/ false);
8012 if (N1CFP && N2CFP) {
8013 APFloat C1 = N1CFP->getValueAPF(); // make copy
8014 const APFloat &C2 = N2CFP->getValueAPF();
8015 switch (Opcode) {
8016 case ISD::FADD:
8018 return getConstantFP(C1, DL, VT);
8019 case ISD::FSUB:
8021 return getConstantFP(C1, DL, VT);
8022 case ISD::FMUL:
8024 return getConstantFP(C1, DL, VT);
8025 case ISD::FDIV:
8027 return getConstantFP(C1, DL, VT);
8028 case ISD::FREM:
8029 C1.mod(C2);
8030 return getConstantFP(C1, DL, VT);
8031 case ISD::FCOPYSIGN:
8032 C1.copySign(C2);
8033 return getConstantFP(C1, DL, VT);
8034 case ISD::FMINNUM:
8035 return getConstantFP(minnum(C1, C2), DL, VT);
8036 case ISD::FMAXNUM:
8037 return getConstantFP(maxnum(C1, C2), DL, VT);
8038 case ISD::FMINIMUM:
8039 return getConstantFP(minimum(C1, C2), DL, VT);
8040 case ISD::FMAXIMUM:
8041 return getConstantFP(maximum(C1, C2), DL, VT);
8042 case ISD::FMINIMUMNUM:
8043 return getConstantFP(minimumnum(C1, C2), DL, VT);
8044 case ISD::FMAXIMUMNUM:
8045 return getConstantFP(maximumnum(C1, C2), DL, VT);
8046 default: break;
8047 }
8048 }
8049 if (N1CFP && Opcode == ISD::FP_ROUND) {
8050 APFloat C1 = N1CFP->getValueAPF(); // make copy
8051 bool Unused;
8052 // This can return overflow, underflow, or inexact; we don't care.
8053 // FIXME need to be more flexible about rounding mode.
8055 &Unused);
8056 return getConstantFP(C1, DL, VT);
8057 }
8058
8059 switch (Opcode) {
8060 case ISD::FSUB:
8061 // -0.0 - undef --> undef (consistent with "fneg undef")
8062 if (ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1, /*AllowUndefs*/ true))
8063 if (N1C && N1C->getValueAPF().isNegZero() && N2.isUndef())
8064 return getUNDEF(VT);
8065 [[fallthrough]];
8066
8067 case ISD::FADD:
8068 case ISD::FMUL:
8069 case ISD::FDIV:
8070 case ISD::FREM:
8071 // If both operands are undef, the result is undef. If 1 operand is undef,
8072 // the result is NaN. This should match the behavior of the IR optimizer.
8073 if (N1.isUndef() && N2.isUndef())
8074 return getUNDEF(VT);
8075 if (N1.isUndef() || N2.isUndef())
8077 }
8078 return SDValue();
8079}
8080
8082 const SDLoc &DL, EVT DstEltVT) {
8083 EVT SrcEltVT = BV->getValueType(0).getVectorElementType();
8084
8085 // If this is already the right type, we're done.
8086 if (SrcEltVT == DstEltVT)
8087 return SDValue(BV, 0);
8088
8089 unsigned SrcBitSize = SrcEltVT.getSizeInBits();
8090 unsigned DstBitSize = DstEltVT.getSizeInBits();
8091
8092 // If this is a conversion of N elements of one type to N elements of another
8093 // type, convert each element. This handles FP<->INT cases.
8094 if (SrcBitSize == DstBitSize) {
8096 for (SDValue Op : BV->op_values()) {
8097 // If the vector element type is not legal, the BUILD_VECTOR operands
8098 // are promoted and implicitly truncated. Make that explicit here.
8099 if (Op.getValueType() != SrcEltVT)
8100 Op = getNode(ISD::TRUNCATE, DL, SrcEltVT, Op);
8101 Ops.push_back(getBitcast(DstEltVT, Op));
8102 }
8103 EVT VT = EVT::getVectorVT(*getContext(), DstEltVT,
8105 return getBuildVector(VT, DL, Ops);
8106 }
8107
8108 // Otherwise, we're growing or shrinking the elements. To avoid having to
8109 // handle annoying details of growing/shrinking FP values, we convert them to
8110 // int first.
8111 if (SrcEltVT.isFloatingPoint()) {
8112 // Convert the input float vector to a int vector where the elements are the
8113 // same sizes.
8114 EVT IntEltVT = EVT::getIntegerVT(*getContext(), SrcEltVT.getSizeInBits());
8115 if (SDValue Tmp = FoldConstantBuildVector(BV, DL, IntEltVT))
8117 DstEltVT);
8118 return SDValue();
8119 }
8120
8121 // Now we know the input is an integer vector. If the output is a FP type,
8122 // convert to integer first, then to FP of the right size.
8123 if (DstEltVT.isFloatingPoint()) {
8124 EVT IntEltVT = EVT::getIntegerVT(*getContext(), DstEltVT.getSizeInBits());
8125 if (SDValue Tmp = FoldConstantBuildVector(BV, DL, IntEltVT))
8127 DstEltVT);
8128 return SDValue();
8129 }
8130
8131 // Okay, we know the src/dst types are both integers of differing types.
8132 assert(SrcEltVT.isInteger() && DstEltVT.isInteger());
8133
8134 // Extract the constant raw bit data.
8135 BitVector UndefElements;
8136 SmallVector<APInt> RawBits;
8137 bool IsLE = getDataLayout().isLittleEndian();
8138 if (!BV->getConstantRawBits(IsLE, DstBitSize, RawBits, UndefElements))
8139 return SDValue();
8140
8142 for (unsigned I = 0, E = RawBits.size(); I != E; ++I) {
8143 if (UndefElements[I])
8144 Ops.push_back(getUNDEF(DstEltVT));
8145 else
8146 Ops.push_back(getConstant(RawBits[I], DL, DstEltVT));
8147 }
8148
8149 EVT VT = EVT::getVectorVT(*getContext(), DstEltVT, Ops.size());
8150 return getBuildVector(VT, DL, Ops);
8151}
8152
8154 assert(Val.getValueType().isInteger() && "Invalid AssertAlign!");
8155
8156 // There's no need to assert on a byte-aligned pointer. All pointers are at
8157 // least byte aligned.
8158 if (A == Align(1))
8159 return Val;
8160
8161 SDVTList VTs = getVTList(Val.getValueType());
8163 AddNodeIDNode(ID, ISD::AssertAlign, VTs, {Val});
8164 ID.AddInteger(A.value());
8165
8166 void *IP = nullptr;
8167 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
8168 return SDValue(E, 0);
8169
8170 auto *N =
8171 newSDNode<AssertAlignSDNode>(DL.getIROrder(), DL.getDebugLoc(), VTs, A);
8172 createOperands(N, {Val});
8173
8174 CSEMap.InsertNode(N, IP);
8175 InsertNode(N);
8176
8177 SDValue V(N, 0);
8178 NewSDValueDbgMsg(V, "Creating new node: ", this);
8179 return V;
8180}
8181
8182SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
8183 SDValue N1, SDValue N2) {
8184 SDNodeFlags Flags;
8185 if (Inserter)
8186 Flags = Inserter->getFlags();
8187 return getNode(Opcode, DL, VT, N1, N2, Flags);
8188}
8189
8191 SDValue &N2) const {
8192 if (!TLI->isCommutativeBinOp(Opcode))
8193 return;
8194
8195 // Canonicalize:
8196 // binop(const, nonconst) -> binop(nonconst, const)
8199 bool N1CFP = isConstantFPBuildVectorOrConstantFP(N1);
8200 bool N2CFP = isConstantFPBuildVectorOrConstantFP(N2);
8201 if ((N1C && !N2C) || (N1CFP && !N2CFP))
8202 std::swap(N1, N2);
8203
8204 // Canonicalize:
8205 // binop(splat(x), step_vector) -> binop(step_vector, splat(x))
8206 else if (N1.getOpcode() == ISD::SPLAT_VECTOR &&
8208 std::swap(N1, N2);
8209}
8210
8211SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
8212 SDValue N1, SDValue N2, const SDNodeFlags Flags) {
8214 N2.getOpcode() != ISD::DELETED_NODE &&
8215 "Operand is DELETED_NODE!");
8216
8217 canonicalizeCommutativeBinop(Opcode, N1, N2);
8218
8219 auto *N1C = dyn_cast<ConstantSDNode>(N1);
8220 auto *N2C = dyn_cast<ConstantSDNode>(N2);
8221
8222 // Don't allow undefs in vector splats - we might be returning N2 when folding
8223 // to zero etc.
8224 ConstantSDNode *N2CV =
8225 isConstOrConstSplat(N2, /*AllowUndefs*/ false, /*AllowTruncation*/ true);
8226
8227 switch (Opcode) {
8228 default: break;
8229 case ISD::TokenFactor:
8230 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
8231 N2.getValueType() == MVT::Other && "Invalid token factor!");
8232 // Fold trivial token factors.
8233 if (N1.getOpcode() == ISD::EntryToken) return N2;
8234 if (N2.getOpcode() == ISD::EntryToken) return N1;
8235 if (N1 == N2) return N1;
8236 break;
8237 case ISD::BUILD_VECTOR: {
8238 // Attempt to simplify BUILD_VECTOR.
8239 SDValue Ops[] = {N1, N2};
8240 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
8241 return V;
8242 break;
8243 }
8244 case ISD::CONCAT_VECTORS: {
8245 SDValue Ops[] = {N1, N2};
8246 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
8247 return V;
8248 break;
8249 }
8250 case ISD::AND:
8251 assert(VT.isInteger() && "This operator does not apply to FP types!");
8252 assert(N1.getValueType() == N2.getValueType() &&
8253 N1.getValueType() == VT && "Binary operator types must match!");
8254 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
8255 // worth handling here.
8256 if (N2CV && N2CV->isZero())
8257 return N2;
8258 if (N2CV && N2CV->isAllOnes()) // X & -1 -> X
8259 return N1;
8260 break;
8261 case ISD::OR:
8262 case ISD::XOR:
8263 case ISD::ADD:
8264 case ISD::PTRADD:
8265 case ISD::SUB:
8266 assert(VT.isInteger() && "This operator does not apply to FP types!");
8267 assert(N1.getValueType() == N2.getValueType() &&
8268 N1.getValueType() == VT && "Binary operator types must match!");
8269 // The equal operand types requirement is unnecessarily strong for PTRADD.
8270 // However, the SelectionDAGBuilder does not generate PTRADDs with different
8271 // operand types, and we'd need to re-implement GEP's non-standard wrapping
8272 // logic everywhere where PTRADDs may be folded or combined to properly
8273 // support them. If/when we introduce pointer types to the SDAG, we will
8274 // need to relax this constraint.
8275
8276 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
8277 // it's worth handling here.
8278 if (N2CV && N2CV->isZero())
8279 return N1;
8280 if ((Opcode == ISD::ADD || Opcode == ISD::SUB) &&
8281 VT.getScalarType() == MVT::i1)
8282 return getNode(ISD::XOR, DL, VT, N1, N2);
8283 // Fold (add (vscale * C0), (vscale * C1)) to (vscale * (C0 + C1)).
8284 if (Opcode == ISD::ADD && N1.getOpcode() == ISD::VSCALE &&
8285 N2.getOpcode() == ISD::VSCALE) {
8286 const APInt &C1 = N1->getConstantOperandAPInt(0);
8287 const APInt &C2 = N2->getConstantOperandAPInt(0);
8288 return getVScale(DL, VT, C1 + C2);
8289 }
8290 break;
8291 case ISD::MUL:
8292 assert(VT.isInteger() && "This operator does not apply to FP types!");
8293 assert(N1.getValueType() == N2.getValueType() &&
8294 N1.getValueType() == VT && "Binary operator types must match!");
8295 if (VT.getScalarType() == MVT::i1)
8296 return getNode(ISD::AND, DL, VT, N1, N2);
8297 if (N2CV && N2CV->isZero())
8298 return N2;
8299 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
8300 const APInt &MulImm = N1->getConstantOperandAPInt(0);
8301 const APInt &N2CImm = N2C->getAPIntValue();
8302 return getVScale(DL, VT, MulImm * N2CImm);
8303 }
8304 break;
8305 case ISD::UDIV:
8306 case ISD::UREM:
8307 case ISD::MULHU:
8308 case ISD::MULHS:
8309 case ISD::SDIV:
8310 case ISD::SREM:
8311 case ISD::SADDSAT:
8312 case ISD::SSUBSAT:
8313 case ISD::UADDSAT:
8314 case ISD::USUBSAT:
8315 assert(VT.isInteger() && "This operator does not apply to FP types!");
8316 assert(N1.getValueType() == N2.getValueType() &&
8317 N1.getValueType() == VT && "Binary operator types must match!");
8318 if (VT.getScalarType() == MVT::i1) {
8319 // fold (add_sat x, y) -> (or x, y) for bool types.
8320 if (Opcode == ISD::SADDSAT || Opcode == ISD::UADDSAT)
8321 return getNode(ISD::OR, DL, VT, N1, N2);
8322 // fold (sub_sat x, y) -> (and x, ~y) for bool types.
8323 if (Opcode == ISD::SSUBSAT || Opcode == ISD::USUBSAT)
8324 return getNode(ISD::AND, DL, VT, N1, getNOT(DL, N2, VT));
8325 }
8326 break;
8327 case ISD::SCMP:
8328 case ISD::UCMP:
8329 assert(N1.getValueType() == N2.getValueType() &&
8330 "Types of operands of UCMP/SCMP must match");
8331 assert(N1.getValueType().isVector() == VT.isVector() &&
8332 "Operands and return type of must both be scalars or vectors");
8333 if (VT.isVector())
8336 "Result and operands must have the same number of elements");
8337 break;
8338 case ISD::AVGFLOORS:
8339 case ISD::AVGFLOORU:
8340 case ISD::AVGCEILS:
8341 case ISD::AVGCEILU:
8342 assert(VT.isInteger() && "This operator does not apply to FP types!");
8343 assert(N1.getValueType() == N2.getValueType() &&
8344 N1.getValueType() == VT && "Binary operator types must match!");
8345 break;
8346 case ISD::ABDS:
8347 case ISD::ABDU:
8348 assert(VT.isInteger() && "This operator does not apply to FP types!");
8349 assert(N1.getValueType() == N2.getValueType() &&
8350 N1.getValueType() == VT && "Binary operator types must match!");
8351 if (VT.getScalarType() == MVT::i1)
8352 return getNode(ISD::XOR, DL, VT, N1, N2);
8353 break;
8354 case ISD::SMIN:
8355 case ISD::UMAX:
8356 assert(VT.isInteger() && "This operator does not apply to FP types!");
8357 assert(N1.getValueType() == N2.getValueType() &&
8358 N1.getValueType() == VT && "Binary operator types must match!");
8359 if (VT.getScalarType() == MVT::i1)
8360 return getNode(ISD::OR, DL, VT, N1, N2);
8361 break;
8362 case ISD::SMAX:
8363 case ISD::UMIN:
8364 assert(VT.isInteger() && "This operator does not apply to FP types!");
8365 assert(N1.getValueType() == N2.getValueType() &&
8366 N1.getValueType() == VT && "Binary operator types must match!");
8367 if (VT.getScalarType() == MVT::i1)
8368 return getNode(ISD::AND, DL, VT, N1, N2);
8369 break;
8370 case ISD::FADD:
8371 case ISD::FSUB:
8372 case ISD::FMUL:
8373 case ISD::FDIV:
8374 case ISD::FREM:
8375 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
8376 assert(N1.getValueType() == N2.getValueType() &&
8377 N1.getValueType() == VT && "Binary operator types must match!");
8378 if (SDValue V = simplifyFPBinop(Opcode, N1, N2, Flags))
8379 return V;
8380 break;
8381 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
8382 assert(N1.getValueType() == VT &&
8385 "Invalid FCOPYSIGN!");
8386 break;
8387 case ISD::SHL:
8388 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
8389 const APInt &MulImm = N1->getConstantOperandAPInt(0);
8390 const APInt &ShiftImm = N2C->getAPIntValue();
8391 return getVScale(DL, VT, MulImm << ShiftImm);
8392 }
8393 [[fallthrough]];
8394 case ISD::SRA:
8395 case ISD::SRL:
8396 if (SDValue V = simplifyShift(N1, N2))
8397 return V;
8398 [[fallthrough]];
8399 case ISD::ROTL:
8400 case ISD::ROTR:
8401 case ISD::SSHLSAT:
8402 case ISD::USHLSAT:
8403 assert(VT == N1.getValueType() &&
8404 "Shift operators return type must be the same as their first arg");
8405 assert(VT.isInteger() && N2.getValueType().isInteger() &&
8406 "Shifts only work on integers");
8407 assert((!VT.isVector() || VT == N2.getValueType()) &&
8408 "Vector shift amounts must be in the same as their first arg");
8409 // Verify that the shift amount VT is big enough to hold valid shift
8410 // amounts. This catches things like trying to shift an i1024 value by an
8411 // i8, which is easy to fall into in generic code that uses
8412 // TLI.getShiftAmount().
8415 "Invalid use of small shift amount with oversized value!");
8416
8417 // Always fold shifts of i1 values so the code generator doesn't need to
8418 // handle them. Since we know the size of the shift has to be less than the
8419 // size of the value, the shift/rotate count is guaranteed to be zero.
8420 if (VT == MVT::i1)
8421 return N1;
8422 if (N2CV && N2CV->isZero())
8423 return N1;
8424 break;
8425 case ISD::FP_ROUND:
8427 VT.bitsLE(N1.getValueType()) && N2C &&
8428 (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
8429 N2.getOpcode() == ISD::TargetConstant && "Invalid FP_ROUND!");
8430 if (N1.getValueType() == VT) return N1; // noop conversion.
8431 break;
8432 case ISD::IS_FPCLASS: {
8434 "IS_FPCLASS is used for a non-floating type");
8435 assert(isa<ConstantSDNode>(N2) && "FPClassTest is not Constant");
8436 // is.fpclass(poison, mask) -> poison
8437 if (N1.getOpcode() == ISD::POISON)
8438 return getPOISON(VT);
8439 FPClassTest Mask = static_cast<FPClassTest>(N2->getAsZExtVal());
8440 // If all tests are made, it doesn't matter what the value is.
8441 if ((Mask & fcAllFlags) == fcAllFlags)
8442 return getBoolConstant(true, DL, VT, N1.getValueType());
8443 if ((Mask & fcAllFlags) == 0)
8444 return getBoolConstant(false, DL, VT, N1.getValueType());
8445 break;
8446 }
8447 case ISD::AssertNoFPClass: {
8449 "AssertNoFPClass is used for a non-floating type");
8450 assert(isa<ConstantSDNode>(N2) && "NoFPClass is not Constant");
8451 FPClassTest NoFPClass = static_cast<FPClassTest>(N2->getAsZExtVal());
8452 assert(llvm::to_underlying(NoFPClass) <=
8454 "FPClassTest value too large");
8455 (void)NoFPClass;
8456 break;
8457 }
8458 case ISD::AssertSext:
8459 case ISD::AssertZext: {
8460 EVT EVT = cast<VTSDNode>(N2)->getVT();
8461 assert(VT == N1.getValueType() && "Not an inreg extend!");
8462 assert(VT.isInteger() && EVT.isInteger() &&
8463 "Cannot *_EXTEND_INREG FP types");
8464 assert(!EVT.isVector() &&
8465 "AssertSExt/AssertZExt type should be the vector element type "
8466 "rather than the vector type!");
8467 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!");
8468 if (VT.getScalarType() == EVT) return N1; // noop assertion.
8469 break;
8470 }
8472 EVT EVT = cast<VTSDNode>(N2)->getVT();
8473 assert(VT == N1.getValueType() && "Not an inreg extend!");
8474 assert(VT.isInteger() && EVT.isInteger() &&
8475 "Cannot *_EXTEND_INREG FP types");
8476 assert(EVT.isVector() == VT.isVector() &&
8477 "SIGN_EXTEND_INREG type should be vector iff the operand "
8478 "type is vector!");
8479 assert((!EVT.isVector() ||
8481 "Vector element counts must match in SIGN_EXTEND_INREG");
8482 assert(EVT.getScalarType().bitsLE(VT.getScalarType()) && "Not extending!");
8483 if (EVT == VT) return N1; // Not actually extending
8484 break;
8485 }
8487 case ISD::FP_TO_UINT_SAT: {
8488 assert(VT.isInteger() && cast<VTSDNode>(N2)->getVT().isInteger() &&
8489 N1.getValueType().isFloatingPoint() && "Invalid FP_TO_*INT_SAT");
8490 assert(N1.getValueType().isVector() == VT.isVector() &&
8491 "FP_TO_*INT_SAT type should be vector iff the operand type is "
8492 "vector!");
8493 assert((!VT.isVector() || VT.getVectorElementCount() ==
8495 "Vector element counts must match in FP_TO_*INT_SAT");
8496 assert(!cast<VTSDNode>(N2)->getVT().isVector() &&
8497 "Type to saturate to must be a scalar.");
8498 assert(cast<VTSDNode>(N2)->getVT().bitsLE(VT.getScalarType()) &&
8499 "Not extending!");
8500 break;
8501 }
8504 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
8505 element type of the vector.");
8506
8507 // Extract from an undefined value or using an undefined index is undefined.
8508 if (N1.isUndef() || N2.isUndef())
8509 return getUNDEF(VT);
8510
8511 // EXTRACT_VECTOR_ELT of out-of-bounds element is POISON for fixed length
8512 // vectors. For scalable vectors we will provide appropriate support for
8513 // dealing with arbitrary indices.
8514 if (N2C && N1.getValueType().isFixedLengthVector() &&
8515 N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements()))
8516 return getPOISON(VT);
8517
8518 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
8519 // expanding copies of large vectors from registers. This only works for
8520 // fixed length vectors, since we need to know the exact number of
8521 // elements.
8522 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS &&
8524 unsigned Factor = N1.getOperand(0).getValueType().getVectorNumElements();
8525 return getExtractVectorElt(DL, VT,
8526 N1.getOperand(N2C->getZExtValue() / Factor),
8527 N2C->getZExtValue() % Factor);
8528 }
8529
8530 // EXTRACT_VECTOR_ELT of BUILD_VECTOR or SPLAT_VECTOR is often formed while
8531 // lowering is expanding large vector constants.
8532 if (N2C && (N1.getOpcode() == ISD::BUILD_VECTOR ||
8533 N1.getOpcode() == ISD::SPLAT_VECTOR)) {
8536 "BUILD_VECTOR used for scalable vectors");
8537 unsigned Index =
8538 N1.getOpcode() == ISD::BUILD_VECTOR ? N2C->getZExtValue() : 0;
8539 SDValue Elt = N1.getOperand(Index);
8540
8541 if (VT != Elt.getValueType())
8542 // If the vector element type is not legal, the BUILD_VECTOR operands
8543 // are promoted and implicitly truncated, and the result implicitly
8544 // extended. Make that explicit here.
8545 Elt = getAnyExtOrTrunc(Elt, DL, VT);
8546
8547 return Elt;
8548 }
8549
8550 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
8551 // operations are lowered to scalars.
8552 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
8553 // If the indices are the same, return the inserted element else
8554 // if the indices are known different, extract the element from
8555 // the original vector.
8556 SDValue N1Op2 = N1.getOperand(2);
8558
8559 if (N1Op2C && N2C) {
8560 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
8561 if (VT == N1.getOperand(1).getValueType())
8562 return N1.getOperand(1);
8563 if (VT.isFloatingPoint()) {
8565 return getFPExtendOrRound(N1.getOperand(1), DL, VT);
8566 }
8567 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
8568 }
8569 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
8570 }
8571 }
8572
8573 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed
8574 // when vector types are scalarized and v1iX is legal.
8575 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx).
8576 // Here we are completely ignoring the extract element index (N2),
8577 // which is fine for fixed width vectors, since any index other than 0
8578 // is undefined anyway. However, this cannot be ignored for scalable
8579 // vectors - in theory we could support this, but we don't want to do this
8580 // without a profitability check.
8581 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
8583 N1.getValueType().getVectorNumElements() == 1) {
8584 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0),
8585 N1.getOperand(1));
8586 }
8587 break;
8589 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
8590 assert(!N1.getValueType().isVector() && !VT.isVector() &&
8591 (N1.getValueType().isInteger() == VT.isInteger()) &&
8592 N1.getValueType() != VT &&
8593 "Wrong types for EXTRACT_ELEMENT!");
8594
8595 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
8596 // 64-bit integers into 32-bit parts. Instead of building the extract of
8597 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
8598 if (N1.getOpcode() == ISD::BUILD_PAIR)
8599 return N1.getOperand(N2C->getZExtValue());
8600
8601 // EXTRACT_ELEMENT of a constant int is also very common.
8602 if (N1C) {
8603 unsigned ElementSize = VT.getSizeInBits();
8604 unsigned Shift = ElementSize * N2C->getZExtValue();
8605 const APInt &Val = N1C->getAPIntValue();
8606 return getConstant(Val.extractBits(ElementSize, Shift), DL, VT);
8607 }
8608 break;
8610 EVT N1VT = N1.getValueType();
8611 assert(VT.isVector() && N1VT.isVector() &&
8612 "Extract subvector VTs must be vectors!");
8614 "Extract subvector VTs must have the same element type!");
8615 assert((VT.isFixedLengthVector() || N1VT.isScalableVector()) &&
8616 "Cannot extract a scalable vector from a fixed length vector!");
8617 assert((VT.isScalableVector() != N1VT.isScalableVector() ||
8619 "Extract subvector must be from larger vector to smaller vector!");
8620 assert(N2C && "Extract subvector index must be a constant");
8621 assert((VT.isScalableVector() != N1VT.isScalableVector() ||
8622 (VT.getVectorMinNumElements() + N2C->getZExtValue()) <=
8623 N1VT.getVectorMinNumElements()) &&
8624 "Extract subvector overflow!");
8625 assert(N2C->getAPIntValue().getBitWidth() ==
8626 TLI->getVectorIdxWidth(getDataLayout()) &&
8627 "Constant index for EXTRACT_SUBVECTOR has an invalid size");
8628 assert(N2C->getZExtValue() % VT.getVectorMinNumElements() == 0 &&
8629 "Extract index is not a multiple of the output vector length");
8630
8631 // Trivial extraction.
8632 if (VT == N1VT)
8633 return N1;
8634
8635 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF.
8636 if (N1.isUndef())
8637 return getUNDEF(VT);
8638
8639 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
8640 // the concat have the same type as the extract.
8641 if (N1.getOpcode() == ISD::CONCAT_VECTORS &&
8642 VT == N1.getOperand(0).getValueType()) {
8643 unsigned Factor = VT.getVectorMinNumElements();
8644 return N1.getOperand(N2C->getZExtValue() / Factor);
8645 }
8646
8647 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
8648 // during shuffle legalization.
8649 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) &&
8650 VT == N1.getOperand(1).getValueType())
8651 return N1.getOperand(1);
8652 break;
8653 }
8654 }
8655
8656 if (N1.getOpcode() == ISD::POISON || N2.getOpcode() == ISD::POISON) {
8657 switch (Opcode) {
8658 case ISD::XOR:
8659 case ISD::ADD:
8660 case ISD::PTRADD:
8661 case ISD::SUB:
8663 case ISD::UDIV:
8664 case ISD::SDIV:
8665 case ISD::UREM:
8666 case ISD::SREM:
8667 case ISD::MUL:
8668 case ISD::AND:
8669 case ISD::SSUBSAT:
8670 case ISD::USUBSAT:
8671 case ISD::UMIN:
8672 case ISD::OR:
8673 case ISD::SADDSAT:
8674 case ISD::UADDSAT:
8675 case ISD::UMAX:
8676 case ISD::SMAX:
8677 case ISD::SMIN:
8678 // fold op(arg1, poison) -> poison, fold op(poison, arg2) -> poison.
8679 return N2.getOpcode() == ISD::POISON ? N2 : N1;
8680 }
8681 }
8682
8683 // Canonicalize an UNDEF to the RHS, even over a constant.
8684 if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() != ISD::UNDEF) {
8685 if (TLI->isCommutativeBinOp(Opcode)) {
8686 std::swap(N1, N2);
8687 } else {
8688 switch (Opcode) {
8689 case ISD::PTRADD:
8690 case ISD::SUB:
8691 // fold op(undef, non_undef_arg2) -> undef.
8692 return N1;
8694 case ISD::UDIV:
8695 case ISD::SDIV:
8696 case ISD::UREM:
8697 case ISD::SREM:
8698 case ISD::SSUBSAT:
8699 case ISD::USUBSAT:
8700 // fold op(undef, non_undef_arg2) -> 0.
8701 return getConstant(0, DL, VT);
8702 }
8703 }
8704 }
8705
8706 // Fold a bunch of operators when the RHS is undef.
8707 if (N2.getOpcode() == ISD::UNDEF) {
8708 switch (Opcode) {
8709 case ISD::XOR:
8710 if (N1.getOpcode() == ISD::UNDEF)
8711 // Handle undef ^ undef -> 0 special case. This is a common
8712 // idiom (misuse).
8713 return getConstant(0, DL, VT);
8714 [[fallthrough]];
8715 case ISD::ADD:
8716 case ISD::PTRADD:
8717 case ISD::SUB:
8718 // fold op(arg1, undef) -> undef.
8719 return N2;
8720 case ISD::UDIV:
8721 case ISD::SDIV:
8722 case ISD::UREM:
8723 case ISD::SREM:
8724 // fold op(arg1, undef) -> poison.
8725 return getPOISON(VT);
8726 case ISD::MUL:
8727 case ISD::AND:
8728 case ISD::SSUBSAT:
8729 case ISD::USUBSAT:
8730 case ISD::UMIN:
8731 // fold op(undef, undef) -> undef, fold op(arg1, undef) -> 0.
8732 return N1.getOpcode() == ISD::UNDEF ? N2 : getConstant(0, DL, VT);
8733 case ISD::OR:
8734 case ISD::SADDSAT:
8735 case ISD::UADDSAT:
8736 case ISD::UMAX:
8737 // fold op(undef, undef) -> undef, fold op(arg1, undef) -> -1.
8738 return N1.getOpcode() == ISD::UNDEF ? N2 : getAllOnesConstant(DL, VT);
8739 case ISD::SMAX:
8740 // fold op(undef, undef) -> undef, fold op(arg1, undef) -> MAX_INT.
8741 return N1.getOpcode() == ISD::UNDEF
8742 ? N2
8743 : getConstant(
8745 VT);
8746 case ISD::SMIN:
8747 // fold op(undef, undef) -> undef, fold op(arg1, undef) -> MIN_INT.
8748 return N1.getOpcode() == ISD::UNDEF
8749 ? N2
8750 : getConstant(
8752 VT);
8753 }
8754 }
8755
8756 // Perform trivial constant folding.
8757 if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2}, Flags))
8758 return SV;
8759
8760 // Memoize this node if possible.
8761 SDNode *N;
8762 SDVTList VTs = getVTList(VT);
8763 SDValue Ops[] = {N1, N2};
8764 if (VT != MVT::Glue) {
8766 AddNodeIDNode(ID, Opcode, VTs, Ops);
8767 void *IP = nullptr;
8768 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
8769 E->intersectFlagsWith(Flags);
8770 return SDValue(E, 0);
8771 }
8772
8773 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
8774 N->setFlags(Flags);
8775 createOperands(N, Ops);
8776 CSEMap.InsertNode(N, IP);
8777 } else {
8778 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
8779 createOperands(N, Ops);
8780 }
8781
8782 InsertNode(N);
8783 SDValue V = SDValue(N, 0);
8784 NewSDValueDbgMsg(V, "Creating new node: ", this);
8785 return V;
8786}
8787
8788SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
8789 SDValue N1, SDValue N2, SDValue N3) {
8790 SDNodeFlags Flags;
8791 if (Inserter)
8792 Flags = Inserter->getFlags();
8793 return getNode(Opcode, DL, VT, N1, N2, N3, Flags);
8794}
8795
8796SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
8797 SDValue N1, SDValue N2, SDValue N3,
8798 const SDNodeFlags Flags) {
8800 N2.getOpcode() != ISD::DELETED_NODE &&
8801 N3.getOpcode() != ISD::DELETED_NODE &&
8802 "Operand is DELETED_NODE!");
8803 // Perform various simplifications.
8804 switch (Opcode) {
8805 case ISD::BUILD_VECTOR: {
8806 // Attempt to simplify BUILD_VECTOR.
8807 SDValue Ops[] = {N1, N2, N3};
8808 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
8809 return V;
8810 break;
8811 }
8812 case ISD::CONCAT_VECTORS: {
8813 SDValue Ops[] = {N1, N2, N3};
8814 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
8815 return V;
8816 break;
8817 }
8818 case ISD::SETCC: {
8819 assert(VT.isInteger() && "SETCC result type must be an integer!");
8820 assert(N1.getValueType() == N2.getValueType() &&
8821 "SETCC operands must have the same type!");
8822 assert(VT.isVector() == N1.getValueType().isVector() &&
8823 "SETCC type should be vector iff the operand type is vector!");
8824 assert((!VT.isVector() || VT.getVectorElementCount() ==
8826 "SETCC vector element counts must match!");
8827 // Use FoldSetCC to simplify SETCC's.
8828 if (SDValue V =
8829 FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL, Flags))
8830 return V;
8831 break;
8832 }
8833 case ISD::SELECT:
8834 case ISD::VSELECT:
8835 if (SDValue V = simplifySelect(N1, N2, N3))
8836 return V;
8837 break;
8839 llvm_unreachable("should use getVectorShuffle constructor!");
8841 if (isNullConstant(N3))
8842 return N1;
8843 break;
8845 if (isNullConstant(N3))
8846 return N2;
8847 break;
8849 assert(VT.isVector() && VT == N1.getValueType() &&
8850 "INSERT_VECTOR_ELT vector type mismatch");
8852 "INSERT_VECTOR_ELT scalar fp/int mismatch");
8853 assert((!VT.isFloatingPoint() ||
8854 VT.getVectorElementType() == N2.getValueType()) &&
8855 "INSERT_VECTOR_ELT fp scalar type mismatch");
8856 assert((!VT.isInteger() ||
8858 "INSERT_VECTOR_ELT int scalar size mismatch");
8859
8860 auto *N3C = dyn_cast<ConstantSDNode>(N3);
8861 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF, except
8862 // for scalable vectors where we will generate appropriate code to
8863 // deal with out-of-bounds cases correctly.
8864 if (N3C && VT.isFixedLengthVector() &&
8865 N3C->getZExtValue() >= VT.getVectorNumElements())
8866 return getUNDEF(VT);
8867
8868 // Undefined index can be assumed out-of-bounds, so that's UNDEF too.
8869 if (N3.isUndef())
8870 return getUNDEF(VT);
8871
8872 // If inserting poison, just use the input vector.
8873 if (N2.getOpcode() == ISD::POISON)
8874 return N1;
8875
8876 // Inserting undef into undef/poison is still undef.
8877 if (N2.getOpcode() == ISD::UNDEF && N1.isUndef())
8878 return getUNDEF(VT);
8879
8880 // If the inserted element is an UNDEF, just use the input vector.
8881 // But not if skipping the insert could make the result more poisonous.
8882 if (N2.isUndef()) {
8883 if (N3C && VT.isFixedLengthVector()) {
8884 APInt EltMask =
8885 APInt::getOneBitSet(VT.getVectorNumElements(), N3C->getZExtValue());
8886 if (isGuaranteedNotToBePoison(N1, EltMask))
8887 return N1;
8888 } else if (isGuaranteedNotToBePoison(N1))
8889 return N1;
8890 }
8891 break;
8892 }
8893 case ISD::INSERT_SUBVECTOR: {
8894 // If inserting poison, just use the input vector,
8895 if (N2.getOpcode() == ISD::POISON)
8896 return N1;
8897
8898 // Inserting undef into undef/poison is still undef.
8899 if (N2.getOpcode() == ISD::UNDEF && N1.isUndef())
8900 return getUNDEF(VT);
8901
8902 EVT N2VT = N2.getValueType();
8903 assert(VT == N1.getValueType() &&
8904 "Dest and insert subvector source types must match!");
8905 assert(VT.isVector() && N2VT.isVector() &&
8906 "Insert subvector VTs must be vectors!");
8908 "Insert subvector VTs must have the same element type!");
8909 assert((VT.isScalableVector() || N2VT.isFixedLengthVector()) &&
8910 "Cannot insert a scalable vector into a fixed length vector!");
8911 assert((VT.isScalableVector() != N2VT.isScalableVector() ||
8913 "Insert subvector must be from smaller vector to larger vector!");
8915 "Insert subvector index must be constant");
8916 assert((VT.isScalableVector() != N2VT.isScalableVector() ||
8917 (N2VT.getVectorMinNumElements() + N3->getAsZExtVal()) <=
8919 "Insert subvector overflow!");
8921 TLI->getVectorIdxWidth(getDataLayout()) &&
8922 "Constant index for INSERT_SUBVECTOR has an invalid size");
8923
8924 // Trivial insertion.
8925 if (VT == N2VT)
8926 return N2;
8927
8928 // If this is an insert of an extracted vector into an undef/poison vector,
8929 // we can just use the input to the extract. But not if skipping the
8930 // extract+insert could make the result more poisonous.
8931 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
8932 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT) {
8933 if (N1.getOpcode() == ISD::POISON)
8934 return N2.getOperand(0);
8935 if (VT.isFixedLengthVector() && N2VT.isFixedLengthVector()) {
8936 unsigned LoBit = N3->getAsZExtVal();
8937 unsigned HiBit = LoBit + N2VT.getVectorNumElements();
8938 APInt EltMask =
8939 APInt::getBitsSet(VT.getVectorNumElements(), LoBit, HiBit);
8940 if (isGuaranteedNotToBePoison(N2.getOperand(0), ~EltMask))
8941 return N2.getOperand(0);
8942 } else if (isGuaranteedNotToBePoison(N2.getOperand(0)))
8943 return N2.getOperand(0);
8944 }
8945
8946 // If the inserted subvector is UNDEF, just use the input vector.
8947 // But not if skipping the insert could make the result more poisonous.
8948 if (N2.isUndef()) {
8949 if (VT.isFixedLengthVector()) {
8950 unsigned LoBit = N3->getAsZExtVal();
8951 unsigned HiBit = LoBit + N2VT.getVectorNumElements();
8952 APInt EltMask =
8953 APInt::getBitsSet(VT.getVectorNumElements(), LoBit, HiBit);
8954 if (isGuaranteedNotToBePoison(N1, EltMask))
8955 return N1;
8956 } else if (isGuaranteedNotToBePoison(N1))
8957 return N1;
8958 }
8959 break;
8960 }
8961 case ISD::BITCAST:
8962 // Fold bit_convert nodes from a type to themselves.
8963 if (N1.getValueType() == VT)
8964 return N1;
8965 break;
8966 case ISD::VP_TRUNCATE:
8967 case ISD::VP_SIGN_EXTEND:
8968 case ISD::VP_ZERO_EXTEND:
8969 // Don't create noop casts.
8970 if (N1.getValueType() == VT)
8971 return N1;
8972 break;
8973 case ISD::VECTOR_COMPRESS: {
8974 [[maybe_unused]] EVT VecVT = N1.getValueType();
8975 [[maybe_unused]] EVT MaskVT = N2.getValueType();
8976 [[maybe_unused]] EVT PassthruVT = N3.getValueType();
8977 assert(VT == VecVT && "Vector and result type don't match.");
8978 assert(VecVT.isVector() && MaskVT.isVector() && PassthruVT.isVector() &&
8979 "All inputs must be vectors.");
8980 assert(VecVT == PassthruVT && "Vector and passthru types don't match.");
8982 "Vector and mask must have same number of elements.");
8983
8984 if (N1.isUndef() || N2.isUndef())
8985 return N3;
8986
8987 break;
8988 }
8993 [[maybe_unused]] EVT AccVT = N1.getValueType();
8994 [[maybe_unused]] EVT Input1VT = N2.getValueType();
8995 [[maybe_unused]] EVT Input2VT = N3.getValueType();
8996 assert(Input1VT.isVector() && Input1VT == Input2VT &&
8997 "Expected the second and third operands of the PARTIAL_REDUCE_MLA "
8998 "node to have the same type!");
8999 assert(VT.isVector() && VT == AccVT &&
9000 "Expected the first operand of the PARTIAL_REDUCE_MLA node to have "
9001 "the same type as its result!");
9003 AccVT.getVectorElementCount()) &&
9004 "Expected the element count of the second and third operands of the "
9005 "PARTIAL_REDUCE_MLA node to be a positive integer multiple of the "
9006 "element count of the first operand and the result!");
9008 "Expected the second and third operands of the PARTIAL_REDUCE_MLA "
9009 "node to have an element type which is the same as or smaller than "
9010 "the element type of the first operand and result!");
9011 break;
9012 }
9013 }
9014
9015 // Perform trivial constant folding for arithmetic operators.
9016 switch (Opcode) {
9017 case ISD::FMA:
9018 case ISD::FMAD:
9019 case ISD::SETCC:
9020 case ISD::FSHL:
9021 case ISD::FSHR:
9022 if (SDValue SV =
9023 FoldConstantArithmetic(Opcode, DL, VT, {N1, N2, N3}, Flags))
9024 return SV;
9025 break;
9026 }
9027
9028 // Memoize node if it doesn't produce a glue result.
9029 SDNode *N;
9030 SDVTList VTs = getVTList(VT);
9031 SDValue Ops[] = {N1, N2, N3};
9032 if (VT != MVT::Glue) {
9034 AddNodeIDNode(ID, Opcode, VTs, Ops);
9035 void *IP = nullptr;
9036 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
9037 E->intersectFlagsWith(Flags);
9038 return SDValue(E, 0);
9039 }
9040
9041 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
9042 N->setFlags(Flags);
9043 createOperands(N, Ops);
9044 CSEMap.InsertNode(N, IP);
9045 } else {
9046 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
9047 createOperands(N, Ops);
9048 }
9049
9050 InsertNode(N);
9051 SDValue V = SDValue(N, 0);
9052 NewSDValueDbgMsg(V, "Creating new node: ", this);
9053 return V;
9054}
9055
9056SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
9057 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
9058 const SDNodeFlags Flags) {
9059 SDValue Ops[] = { N1, N2, N3, N4 };
9060 return getNode(Opcode, DL, VT, Ops, Flags);
9061}
9062
9063SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
9064 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
9065 SDNodeFlags Flags;
9066 if (Inserter)
9067 Flags = Inserter->getFlags();
9068 return getNode(Opcode, DL, VT, N1, N2, N3, N4, Flags);
9069}
9070
9071SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
9072 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
9073 SDValue N5, const SDNodeFlags Flags) {
9074 SDValue Ops[] = { N1, N2, N3, N4, N5 };
9075 return getNode(Opcode, DL, VT, Ops, Flags);
9076}
9077
9078SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
9079 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
9080 SDValue N5) {
9081 SDNodeFlags Flags;
9082 if (Inserter)
9083 Flags = Inserter->getFlags();
9084 return getNode(Opcode, DL, VT, N1, N2, N3, N4, N5, Flags);
9085}
9086
9087/// getStackArgumentTokenFactor - Compute a TokenFactor to force all
9088/// the incoming stack arguments to be loaded from the stack.
9090 SmallVector<SDValue, 8> ArgChains;
9091
9092 // Include the original chain at the beginning of the list. When this is
9093 // used by target LowerCall hooks, this helps legalize find the
9094 // CALLSEQ_BEGIN node.
9095 ArgChains.push_back(Chain);
9096
9097 // Add a chain value for each stack argument.
9098 for (SDNode *U : getEntryNode().getNode()->users())
9099 if (LoadSDNode *L = dyn_cast<LoadSDNode>(U))
9100 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
9101 if (FI->getIndex() < 0)
9102 ArgChains.push_back(SDValue(L, 1));
9103
9104 // Build a tokenfactor for all the chains.
9105 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
9106}
9107
9108/// getMemsetValue - Vectorized representation of the memset value
9109/// operand.
9111 const SDLoc &dl) {
9112 assert(!Value.isUndef());
9113
9114 unsigned NumBits = VT.getScalarSizeInBits();
9116 assert(C->getAPIntValue().getBitWidth() == 8);
9117 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
9118 if (VT.isInteger()) {
9119 bool IsOpaque = VT.getSizeInBits() > 64 ||
9120 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue());
9121 return DAG.getConstant(Val, dl, VT, false, IsOpaque);
9122 }
9123 return DAG.getConstantFP(APFloat(VT.getFltSemantics(), Val), dl, VT);
9124 }
9125
9126 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
9127 EVT IntVT = VT.getScalarType();
9128 if (!IntVT.isInteger())
9129 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
9130
9131 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
9132 if (NumBits > 8) {
9133 // Use a multiplication with 0x010101... to extend the input to the
9134 // required length.
9135 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
9136 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
9137 DAG.getConstant(Magic, dl, IntVT));
9138 }
9139
9140 if (VT != Value.getValueType() && !VT.isInteger())
9141 Value = DAG.getBitcast(VT.getScalarType(), Value);
9142 if (VT != Value.getValueType())
9143 Value = DAG.getSplatBuildVector(VT, dl, Value);
9144
9145 return Value;
9146}
9147
9148/// getMemsetStringVal - Similar to getMemsetValue. Except this is only
9149/// used when a memcpy is turned into a memset when the source is a constant
9150/// string ptr.
9152 const TargetLowering &TLI,
9153 const ConstantDataArraySlice &Slice) {
9154 // Handle vector with all elements zero.
9155 if (Slice.Array == nullptr) {
9156 if (VT.isInteger())
9157 return DAG.getConstant(0, dl, VT);
9158 return DAG.getNode(ISD::BITCAST, dl, VT,
9159 DAG.getConstant(0, dl, VT.changeTypeToInteger()));
9160 }
9161
9162 assert(!VT.isVector() && "Can't handle vector type here!");
9163 unsigned NumVTBits = VT.getSizeInBits();
9164 unsigned NumVTBytes = NumVTBits / 8;
9165 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length));
9166
9167 APInt Val(NumVTBits, 0);
9168 if (DAG.getDataLayout().isLittleEndian()) {
9169 for (unsigned i = 0; i != NumBytes; ++i)
9170 Val |= (uint64_t)(unsigned char)Slice[i] << i*8;
9171 } else {
9172 for (unsigned i = 0; i != NumBytes; ++i)
9173 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
9174 }
9175
9176 // If the "cost" of materializing the integer immediate is less than the cost
9177 // of a load, then it is cost effective to turn the load into the immediate.
9178 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
9179 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
9180 return DAG.getConstant(Val, dl, VT);
9181 return SDValue();
9182}
9183
9185 const SDLoc &DL,
9186 const SDNodeFlags Flags) {
9187 SDValue Index = getTypeSize(DL, Base.getValueType(), Offset);
9188 return getMemBasePlusOffset(Base, Index, DL, Flags);
9189}
9190
9192 const SDLoc &DL,
9193 const SDNodeFlags Flags) {
9194 assert(Offset.getValueType().isInteger());
9195 EVT BasePtrVT = Ptr.getValueType();
9196 if (TLI->shouldPreservePtrArith(this->getMachineFunction().getFunction(),
9197 BasePtrVT))
9198 return getNode(ISD::PTRADD, DL, BasePtrVT, Ptr, Offset, Flags);
9199 // InBounds only applies to PTRADD, don't set it if we generate ADD.
9200 SDNodeFlags AddFlags = Flags;
9201 AddFlags.setInBounds(false);
9202 return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, AddFlags);
9203}
9204
9205/// Returns true if memcpy source is constant data.
9207 uint64_t SrcDelta = 0;
9208 GlobalAddressSDNode *G = nullptr;
9209 if (Src.getOpcode() == ISD::GlobalAddress)
9211 else if (Src->isAnyAdd() &&
9212 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
9213 Src.getOperand(1).getOpcode() == ISD::Constant) {
9214 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
9215 SrcDelta = Src.getConstantOperandVal(1);
9216 }
9217 if (!G)
9218 return false;
9219
9220 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8,
9221 SrcDelta + G->getOffset());
9222}
9223
9225 SelectionDAG &DAG) {
9226 // On Darwin, -Os means optimize for size without hurting performance, so
9227 // only really optimize for size when -Oz (MinSize) is used.
9229 return MF.getFunction().hasMinSize();
9230 return DAG.shouldOptForSize();
9231}
9232
9234 SmallVector<SDValue, 32> &OutChains, unsigned From,
9235 unsigned To, SmallVector<SDValue, 16> &OutLoadChains,
9236 SmallVector<SDValue, 16> &OutStoreChains) {
9237 assert(OutLoadChains.size() && "Missing loads in memcpy inlining");
9238 assert(OutStoreChains.size() && "Missing stores in memcpy inlining");
9239 SmallVector<SDValue, 16> GluedLoadChains;
9240 for (unsigned i = From; i < To; ++i) {
9241 OutChains.push_back(OutLoadChains[i]);
9242 GluedLoadChains.push_back(OutLoadChains[i]);
9243 }
9244
9245 // Chain for all loads.
9246 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
9247 GluedLoadChains);
9248
9249 for (unsigned i = From; i < To; ++i) {
9250 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]);
9251 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(),
9252 ST->getBasePtr(), ST->getMemoryVT(),
9253 ST->getMemOperand());
9254 OutChains.push_back(NewStore);
9255 }
9256}
9257
9259 SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
9260 uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline,
9261 MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo,
9262 const AAMDNodes &AAInfo, BatchAAResults *BatchAA) {
9263 // Turn a memcpy of undef to nop.
9264 // FIXME: We need to honor volatile even is Src is undef.
9265 if (Src.isUndef())
9266 return Chain;
9267
9268 // Expand memcpy to a series of load and store ops if the size operand falls
9269 // below a certain threshold.
9270 // TODO: In the AlwaysInline case, if the size is big then generate a loop
9271 // rather than maybe a humongous number of loads and stores.
9272 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9273 const DataLayout &DL = DAG.getDataLayout();
9274 LLVMContext &C = *DAG.getContext();
9275 std::vector<EVT> MemOps;
9276 bool DstAlignCanChange = false;
9278 MachineFrameInfo &MFI = MF.getFrameInfo();
9279 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
9281 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
9282 DstAlignCanChange = true;
9283 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src);
9284 if (!SrcAlign || Alignment > *SrcAlign)
9285 SrcAlign = Alignment;
9286 assert(SrcAlign && "SrcAlign must be set");
9288 // If marked as volatile, perform a copy even when marked as constant.
9289 bool CopyFromConstant = !isVol && isMemSrcFromConstant(Src, Slice);
9290 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
9291 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
9292 const MemOp Op = isZeroConstant
9293 ? MemOp::Set(Size, DstAlignCanChange, Alignment,
9294 /*IsZeroMemset*/ true, isVol)
9295 : MemOp::Copy(Size, DstAlignCanChange, Alignment,
9296 *SrcAlign, isVol, CopyFromConstant);
9297 if (!TLI.findOptimalMemOpLowering(
9298 C, MemOps, Limit, Op, DstPtrInfo.getAddrSpace(),
9299 SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes(), nullptr))
9300 return SDValue();
9301
9302 if (DstAlignCanChange) {
9303 Type *Ty = MemOps[0].getTypeForEVT(C);
9304 Align NewAlign = DL.getABITypeAlign(Ty);
9305
9306 // Don't promote to an alignment that would require dynamic stack
9307 // realignment which may conflict with optimizations such as tail call
9308 // optimization.
9310 if (!TRI->hasStackRealignment(MF))
9311 if (MaybeAlign StackAlign = DL.getStackAlignment())
9312 NewAlign = std::min(NewAlign, *StackAlign);
9313
9314 if (NewAlign > Alignment) {
9315 // Give the stack frame object a larger alignment if needed.
9316 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
9317 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
9318 Alignment = NewAlign;
9319 }
9320 }
9321
9322 // Prepare AAInfo for loads/stores after lowering this memcpy.
9323 AAMDNodes NewAAInfo = AAInfo;
9324 NewAAInfo.TBAA = NewAAInfo.TBAAStruct = nullptr;
9325
9326 const Value *SrcVal = dyn_cast_if_present<const Value *>(SrcPtrInfo.V);
9327 bool isConstant =
9328 BatchAA && SrcVal &&
9329 BatchAA->pointsToConstantMemory(MemoryLocation(SrcVal, Size, AAInfo));
9330
9331 MachineMemOperand::Flags MMOFlags =
9333 SmallVector<SDValue, 16> OutLoadChains;
9334 SmallVector<SDValue, 16> OutStoreChains;
9335 SmallVector<SDValue, 32> OutChains;
9336 unsigned NumMemOps = MemOps.size();
9337 uint64_t SrcOff = 0, DstOff = 0;
9338 for (unsigned i = 0; i != NumMemOps; ++i) {
9339 EVT VT = MemOps[i];
9340 unsigned VTSize = VT.getSizeInBits() / 8;
9341 SDValue Value, Store;
9342
9343 if (VTSize > Size) {
9344 // Issuing an unaligned load / store pair that overlaps with the previous
9345 // pair. Adjust the offset accordingly.
9346 assert(i == NumMemOps-1 && i != 0);
9347 SrcOff -= VTSize - Size;
9348 DstOff -= VTSize - Size;
9349 }
9350
9351 if (CopyFromConstant &&
9352 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) {
9353 // It's unlikely a store of a vector immediate can be done in a single
9354 // instruction. It would require a load from a constantpool first.
9355 // We only handle zero vectors here.
9356 // FIXME: Handle other cases where store of vector immediate is done in
9357 // a single instruction.
9358 ConstantDataArraySlice SubSlice;
9359 if (SrcOff < Slice.Length) {
9360 SubSlice = Slice;
9361 SubSlice.move(SrcOff);
9362 } else {
9363 // This is an out-of-bounds access and hence UB. Pretend we read zero.
9364 SubSlice.Array = nullptr;
9365 SubSlice.Offset = 0;
9366 SubSlice.Length = VTSize;
9367 }
9368 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice);
9369 if (Value.getNode()) {
9370 Store = DAG.getStore(
9371 Chain, dl, Value,
9372 DAG.getObjectPtrOffset(dl, Dst, TypeSize::getFixed(DstOff)),
9373 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
9374 OutChains.push_back(Store);
9375 }
9376 }
9377
9378 if (!Store.getNode()) {
9379 // The type might not be legal for the target. This should only happen
9380 // if the type is smaller than a legal type, as on PPC, so the right
9381 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
9382 // to Load/Store if NVT==VT.
9383 // FIXME does the case above also need this?
9384 EVT NVT = TLI.getTypeToTransformTo(C, VT);
9385 assert(NVT.bitsGE(VT));
9386
9387 bool isDereferenceable =
9388 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
9389 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
9390 if (isDereferenceable)
9392 if (isConstant)
9393 SrcMMOFlags |= MachineMemOperand::MOInvariant;
9394
9395 Value = DAG.getExtLoad(
9396 ISD::EXTLOAD, dl, NVT, Chain,
9397 DAG.getObjectPtrOffset(dl, Src, TypeSize::getFixed(SrcOff)),
9398 SrcPtrInfo.getWithOffset(SrcOff), VT,
9399 commonAlignment(*SrcAlign, SrcOff), SrcMMOFlags, NewAAInfo);
9400 OutLoadChains.push_back(Value.getValue(1));
9401
9402 Store = DAG.getTruncStore(
9403 Chain, dl, Value,
9404 DAG.getObjectPtrOffset(dl, Dst, TypeSize::getFixed(DstOff)),
9405 DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags, NewAAInfo);
9406 OutStoreChains.push_back(Store);
9407 }
9408 SrcOff += VTSize;
9409 DstOff += VTSize;
9410 Size -= VTSize;
9411 }
9412
9413 unsigned GluedLdStLimit = MaxLdStGlue == 0 ?
9415 unsigned NumLdStInMemcpy = OutStoreChains.size();
9416
9417 if (NumLdStInMemcpy) {
9418 // It may be that memcpy might be converted to memset if it's memcpy
9419 // of constants. In such a case, we won't have loads and stores, but
9420 // just stores. In the absence of loads, there is nothing to gang up.
9421 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) {
9422 // If target does not care, just leave as it.
9423 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) {
9424 OutChains.push_back(OutLoadChains[i]);
9425 OutChains.push_back(OutStoreChains[i]);
9426 }
9427 } else {
9428 // Ld/St less than/equal limit set by target.
9429 if (NumLdStInMemcpy <= GluedLdStLimit) {
9430 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
9431 NumLdStInMemcpy, OutLoadChains,
9432 OutStoreChains);
9433 } else {
9434 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit;
9435 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
9436 unsigned GlueIter = 0;
9437
9438 // Residual ld/st.
9439 if (RemainingLdStInMemcpy) {
9441 DAG, dl, OutChains, NumLdStInMemcpy - RemainingLdStInMemcpy,
9442 NumLdStInMemcpy, OutLoadChains, OutStoreChains);
9443 }
9444
9445 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
9446 unsigned IndexFrom = NumLdStInMemcpy - RemainingLdStInMemcpy -
9447 GlueIter - GluedLdStLimit;
9448 unsigned IndexTo = NumLdStInMemcpy - RemainingLdStInMemcpy - GlueIter;
9449 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo,
9450 OutLoadChains, OutStoreChains);
9451 GlueIter += GluedLdStLimit;
9452 }
9453 }
9454 }
9455 }
9456 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
9457}
9458
9460 SDValue Chain, SDValue Dst, SDValue Src,
9461 uint64_t Size, Align Alignment,
9462 bool isVol, bool AlwaysInline,
9463 MachinePointerInfo DstPtrInfo,
9464 MachinePointerInfo SrcPtrInfo,
9465 const AAMDNodes &AAInfo) {
9466 // Turn a memmove of undef to nop.
9467 // FIXME: We need to honor volatile even is Src is undef.
9468 if (Src.isUndef())
9469 return Chain;
9470
9471 // Expand memmove to a series of load and store ops if the size operand falls
9472 // below a certain threshold.
9473 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9474 const DataLayout &DL = DAG.getDataLayout();
9475 LLVMContext &C = *DAG.getContext();
9476 std::vector<EVT> MemOps;
9477 bool DstAlignCanChange = false;
9479 MachineFrameInfo &MFI = MF.getFrameInfo();
9480 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
9482 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
9483 DstAlignCanChange = true;
9484 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src);
9485 if (!SrcAlign || Alignment > *SrcAlign)
9486 SrcAlign = Alignment;
9487 assert(SrcAlign && "SrcAlign must be set");
9488 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
9489 if (!TLI.findOptimalMemOpLowering(
9490 C, MemOps, Limit,
9491 MemOp::Copy(Size, DstAlignCanChange, Alignment, *SrcAlign, isVol),
9492 DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
9493 MF.getFunction().getAttributes(), nullptr))
9494 return SDValue();
9495
9496 if (DstAlignCanChange) {
9497 Type *Ty = MemOps[0].getTypeForEVT(C);
9498 Align NewAlign = DL.getABITypeAlign(Ty);
9499
9500 // Don't promote to an alignment that would require dynamic stack
9501 // realignment which may conflict with optimizations such as tail call
9502 // optimization.
9504 if (!TRI->hasStackRealignment(MF))
9505 if (MaybeAlign StackAlign = DL.getStackAlignment())
9506 NewAlign = std::min(NewAlign, *StackAlign);
9507
9508 if (NewAlign > Alignment) {
9509 // Give the stack frame object a larger alignment if needed.
9510 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
9511 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
9512 Alignment = NewAlign;
9513 }
9514 }
9515
9516 // Prepare AAInfo for loads/stores after lowering this memmove.
9517 AAMDNodes NewAAInfo = AAInfo;
9518 NewAAInfo.TBAA = NewAAInfo.TBAAStruct = nullptr;
9519
9520 MachineMemOperand::Flags MMOFlags =
9522 uint64_t SrcOff = 0;
9523 SmallVector<SDValue, 8> LoadValues;
9524 SmallVector<SDValue, 8> LoadChains;
9525 SmallVector<SDValue, 8> OutChains;
9526 unsigned NumMemOps = MemOps.size();
9527 for (unsigned i = 0; i < NumMemOps; i++) {
9528 EVT VT = MemOps[i];
9529 unsigned VTSize = VT.getSizeInBits() / 8;
9530 SDValue Value;
9531 bool IsOverlapping = false;
9532
9533 if (i == NumMemOps - 1 && i != 0 && VTSize > Size - SrcOff) {
9534 // Issuing an unaligned load / store pair that overlaps with the previous
9535 // pair. Adjust the offset accordingly.
9536 SrcOff = Size - VTSize;
9537 IsOverlapping = true;
9538 }
9539
9540 // Calculate the actual alignment at the current offset. The alignment at
9541 // SrcOff may be lower than the base alignment, especially when using
9542 // overlapping loads.
9543 Align SrcAlignAtOffset = commonAlignment(*SrcAlign, SrcOff);
9544 if (IsOverlapping) {
9545 // Verify that the target allows misaligned memory accesses at the
9546 // adjusted offset when using overlapping loads.
9547 unsigned Fast;
9548 if (!TLI.allowsMisalignedMemoryAccesses(VT, SrcPtrInfo.getAddrSpace(),
9549 SrcAlignAtOffset, MMOFlags,
9550 &Fast) ||
9551 !Fast) {
9552 // This should have been caught by findOptimalMemOpLowering, but verify
9553 // here for safety.
9554 return SDValue();
9555 }
9556 }
9557
9558 bool isDereferenceable =
9559 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
9560 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
9561 if (isDereferenceable)
9563 Value =
9564 DAG.getLoad(VT, dl, Chain,
9565 DAG.getObjectPtrOffset(dl, Src, TypeSize::getFixed(SrcOff)),
9566 SrcPtrInfo.getWithOffset(SrcOff), SrcAlignAtOffset,
9567 SrcMMOFlags, NewAAInfo);
9568 LoadValues.push_back(Value);
9569 LoadChains.push_back(Value.getValue(1));
9570 SrcOff += VTSize;
9571 }
9572 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
9573 OutChains.clear();
9574 uint64_t DstOff = 0;
9575 for (unsigned i = 0; i < NumMemOps; i++) {
9576 EVT VT = MemOps[i];
9577 unsigned VTSize = VT.getSizeInBits() / 8;
9578 SDValue Store;
9579 bool IsOverlapping = false;
9580
9581 if (i == NumMemOps - 1 && i != 0 && VTSize > Size - DstOff) {
9582 // Issuing an unaligned load / store pair that overlaps with the previous
9583 // pair. Adjust the offset accordingly.
9584 DstOff = Size - VTSize;
9585 IsOverlapping = true;
9586 }
9587
9588 // Calculate the actual alignment at the current offset. The alignment at
9589 // DstOff may be lower than the base alignment, especially when using
9590 // overlapping stores.
9591 Align DstAlignAtOffset = commonAlignment(Alignment, DstOff);
9592 if (IsOverlapping) {
9593 // Verify that the target allows misaligned memory accesses at the
9594 // adjusted offset when using overlapping stores.
9595 unsigned Fast;
9596 if (!TLI.allowsMisalignedMemoryAccesses(VT, DstPtrInfo.getAddrSpace(),
9597 DstAlignAtOffset, MMOFlags,
9598 &Fast) ||
9599 !Fast) {
9600 // This should have been caught by findOptimalMemOpLowering, but verify
9601 // here for safety.
9602 return SDValue();
9603 }
9604 }
9605 Store = DAG.getStore(
9606 Chain, dl, LoadValues[i],
9607 DAG.getObjectPtrOffset(dl, Dst, TypeSize::getFixed(DstOff)),
9608 DstPtrInfo.getWithOffset(DstOff), DstAlignAtOffset, MMOFlags,
9609 NewAAInfo);
9610 OutChains.push_back(Store);
9611 DstOff += VTSize;
9612 }
9613
9614 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
9615}
9616
9617/// Lower the call to 'memset' intrinsic function into a series of store
9618/// operations.
9619///
9620/// \param DAG Selection DAG where lowered code is placed.
9621/// \param dl Link to corresponding IR location.
9622/// \param Chain Control flow dependency.
9623/// \param Dst Pointer to destination memory location.
9624/// \param Src Value of byte to write into the memory.
9625/// \param Size Number of bytes to write.
9626/// \param Alignment Alignment of the destination in bytes.
9627/// \param isVol True if destination is volatile.
9628/// \param AlwaysInline Makes sure no function call is generated.
9629/// \param DstPtrInfo IR information on the memory pointer.
9630/// \returns New head in the control flow, if lowering was successful, empty
9631/// SDValue otherwise.
9632///
9633/// The function tries to replace 'llvm.memset' intrinsic with several store
9634/// operations and value calculation code. This is usually profitable for small
9635/// memory size or when the semantic requires inlining.
9637 SDValue Chain, SDValue Dst, SDValue Src,
9638 uint64_t Size, Align Alignment, bool isVol,
9639 bool AlwaysInline, MachinePointerInfo DstPtrInfo,
9640 const AAMDNodes &AAInfo) {
9641 // Turn a memset of undef to nop.
9642 // FIXME: We need to honor volatile even is Src is undef.
9643 if (Src.isUndef())
9644 return Chain;
9645
9646 // Expand memset to a series of load/store ops if the size operand
9647 // falls below a certain threshold.
9648 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9649 std::vector<EVT> MemOps;
9650 bool DstAlignCanChange = false;
9651 LLVMContext &C = *DAG.getContext();
9653 MachineFrameInfo &MFI = MF.getFrameInfo();
9654 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
9656 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
9657 DstAlignCanChange = true;
9658 bool IsZeroVal = isNullConstant(Src);
9659 unsigned Limit = AlwaysInline ? ~0 : TLI.getMaxStoresPerMemset(OptSize);
9660
9661 EVT LargestVT;
9662 if (!TLI.findOptimalMemOpLowering(
9663 C, MemOps, Limit,
9664 MemOp::Set(Size, DstAlignCanChange, Alignment, IsZeroVal, isVol),
9665 DstPtrInfo.getAddrSpace(), ~0u, MF.getFunction().getAttributes(),
9666 &LargestVT))
9667 return SDValue();
9668
9669 if (DstAlignCanChange) {
9670 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
9671 const DataLayout &DL = DAG.getDataLayout();
9672 Align NewAlign = DL.getABITypeAlign(Ty);
9673
9674 // Don't promote to an alignment that would require dynamic stack
9675 // realignment which may conflict with optimizations such as tail call
9676 // optimization.
9678 if (!TRI->hasStackRealignment(MF))
9679 if (MaybeAlign StackAlign = DL.getStackAlignment())
9680 NewAlign = std::min(NewAlign, *StackAlign);
9681
9682 if (NewAlign > Alignment) {
9683 // Give the stack frame object a larger alignment if needed.
9684 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
9685 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
9686 Alignment = NewAlign;
9687 }
9688 }
9689
9690 SmallVector<SDValue, 8> OutChains;
9691 uint64_t DstOff = 0;
9692 unsigned NumMemOps = MemOps.size();
9693
9694 // Find the largest store and generate the bit pattern for it.
9695 // If target didn't set LargestVT, compute it from MemOps.
9696 if (!LargestVT.isSimple()) {
9697 LargestVT = MemOps[0];
9698 for (unsigned i = 1; i < NumMemOps; i++)
9699 if (MemOps[i].bitsGT(LargestVT))
9700 LargestVT = MemOps[i];
9701 }
9702 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
9703
9704 // Prepare AAInfo for loads/stores after lowering this memset.
9705 AAMDNodes NewAAInfo = AAInfo;
9706 NewAAInfo.TBAA = NewAAInfo.TBAAStruct = nullptr;
9707
9708 for (unsigned i = 0; i < NumMemOps; i++) {
9709 EVT VT = MemOps[i];
9710 unsigned VTSize = VT.getSizeInBits() / 8;
9711 // The target should specify store types that exactly cover the memset size
9712 // (with the last store potentially being oversized for overlapping stores).
9713 assert(Size > 0 && "Target specified more stores than needed in "
9714 "findOptimalMemOpLowering");
9715 if (VTSize > Size) {
9716 // Issuing an unaligned load / store pair that overlaps with the previous
9717 // pair. Adjust the offset accordingly.
9718 assert(i == NumMemOps-1 && i != 0);
9719 DstOff -= VTSize - Size;
9720 }
9721
9722 // If this store is smaller than the largest store see whether we can get
9723 // the smaller value for free with a truncate or extract vector element and
9724 // then store.
9725 SDValue Value = MemSetValue;
9726 if (VT.bitsLT(LargestVT)) {
9727 unsigned Index;
9728 unsigned NElts = LargestVT.getSizeInBits() / VT.getSizeInBits();
9729 EVT SVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), NElts);
9730 if (!LargestVT.isVector() && !VT.isVector() &&
9731 TLI.isTruncateFree(LargestVT, VT))
9732 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
9733 else if (LargestVT.isVector() && !VT.isVector() &&
9735 LargestVT.getTypeForEVT(*DAG.getContext()),
9736 VT.getSizeInBits(), Index) &&
9737 TLI.isTypeLegal(SVT) &&
9738 LargestVT.getSizeInBits() == SVT.getSizeInBits()) {
9739 // Target which can combine store(extractelement VectorTy, Idx) can get
9740 // the smaller value for free.
9741 SDValue TailValue = DAG.getNode(ISD::BITCAST, dl, SVT, MemSetValue);
9742 Value = DAG.getExtractVectorElt(dl, VT, TailValue, Index);
9743 } else
9744 Value = getMemsetValue(Src, VT, DAG, dl);
9745 }
9746 assert(Value.getValueType() == VT && "Value with wrong type.");
9747 SDValue Store = DAG.getStore(
9748 Chain, dl, Value,
9749 DAG.getObjectPtrOffset(dl, Dst, TypeSize::getFixed(DstOff)),
9750 DstPtrInfo.getWithOffset(DstOff), Alignment,
9752 NewAAInfo);
9753 OutChains.push_back(Store);
9754 DstOff += VT.getSizeInBits() / 8;
9755 // For oversized overlapping stores, only subtract the remaining bytes.
9756 // For normal stores, subtract the full store size.
9757 if (VTSize > Size) {
9758 Size = 0;
9759 } else {
9760 Size -= VTSize;
9761 }
9762 }
9763
9764 // After processing all stores, Size should be exactly 0. Any remaining bytes
9765 // indicate a bug in the target's findOptimalMemOpLowering implementation.
9766 assert(Size == 0 && "Target's findOptimalMemOpLowering did not specify "
9767 "stores that exactly cover the memset size");
9768
9769 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
9770}
9771
9773 unsigned AS) {
9774 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
9775 // pointer operands can be losslessly bitcasted to pointers of address space 0
9776 if (AS != 0 && !TLI->getTargetMachine().isNoopAddrSpaceCast(AS, 0)) {
9777 report_fatal_error("cannot lower memory intrinsic in address space " +
9778 Twine(AS));
9779 }
9780}
9781
9783 const SelectionDAG *SelDAG,
9784 bool AllowReturnsFirstArg) {
9785 if (!CI || !CI->isTailCall())
9786 return false;
9787 // TODO: Fix "returns-first-arg" determination so it doesn't depend on which
9788 // helper symbol we lower to.
9789 return isInTailCallPosition(*CI, SelDAG->getTarget(),
9790 AllowReturnsFirstArg &&
9792}
9793
9794static std::pair<SDValue, SDValue>
9797 const CallInst *CI, RTLIB::Libcall Call,
9798 SelectionDAG *DAG, const TargetLowering *TLI) {
9799 RTLIB::LibcallImpl LCImpl = DAG->getLibcalls().getLibcallImpl(Call);
9800
9801 if (LCImpl == RTLIB::Unsupported)
9802 return {};
9803
9805 bool IsTailCall =
9806 isInTailCallPositionWrapper(CI, DAG, /*AllowReturnsFirstArg=*/true);
9807 SDValue Callee =
9808 DAG->getExternalSymbol(LCImpl, TLI->getPointerTy(DAG->getDataLayout()));
9809
9810 CLI.setDebugLoc(dl)
9811 .setChain(Chain)
9813 CI->getType(), Callee, std::move(Args))
9814 .setTailCall(IsTailCall);
9815
9816 return TLI->LowerCallTo(CLI);
9817}
9818
9819std::pair<SDValue, SDValue> SelectionDAG::getStrcmp(SDValue Chain,
9820 const SDLoc &dl, SDValue S1,
9821 SDValue S2,
9822 const CallInst *CI) {
9824 TargetLowering::ArgListTy Args = {{S1, PT}, {S2, PT}};
9825 return getRuntimeCallSDValueHelper(Chain, dl, std::move(Args), CI,
9826 RTLIB::STRCMP, this, TLI);
9827}
9828
9829std::pair<SDValue, SDValue> SelectionDAG::getStrstr(SDValue Chain,
9830 const SDLoc &dl, SDValue S1,
9831 SDValue S2,
9832 const CallInst *CI) {
9834 TargetLowering::ArgListTy Args = {{S1, PT}, {S2, PT}};
9835 return getRuntimeCallSDValueHelper(Chain, dl, std::move(Args), CI,
9836 RTLIB::STRSTR, this, TLI);
9837}
9838
9839std::pair<SDValue, SDValue> SelectionDAG::getMemccpy(SDValue Chain,
9840 const SDLoc &dl,
9841 SDValue Dst, SDValue Src,
9843 const CallInst *CI) {
9845
9847 {Dst, PT},
9848 {Src, PT},
9851 return getRuntimeCallSDValueHelper(Chain, dl, std::move(Args), CI,
9852 RTLIB::MEMCCPY, this, TLI);
9853}
9854
9855std::pair<SDValue, SDValue>
9857 SDValue Mem1, SDValue Size, const CallInst *CI) {
9860 {Mem0, PT},
9861 {Mem1, PT},
9863 return getRuntimeCallSDValueHelper(Chain, dl, std::move(Args), CI,
9864 RTLIB::MEMCMP, this, TLI);
9865}
9866
9867std::pair<SDValue, SDValue> SelectionDAG::getStrcpy(SDValue Chain,
9868 const SDLoc &dl,
9869 SDValue Dst, SDValue Src,
9870 const CallInst *CI) {
9872 TargetLowering::ArgListTy Args = {{Dst, PT}, {Src, PT}};
9873 return getRuntimeCallSDValueHelper(Chain, dl, std::move(Args), CI,
9874 RTLIB::STRCPY, this, TLI);
9875}
9876
9877std::pair<SDValue, SDValue> SelectionDAG::getStrlen(SDValue Chain,
9878 const SDLoc &dl,
9879 SDValue Src,
9880 const CallInst *CI) {
9881 // Emit a library call.
9884 return getRuntimeCallSDValueHelper(Chain, dl, std::move(Args), CI,
9885 RTLIB::STRLEN, this, TLI);
9886}
9887
9889 SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size,
9890 Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI,
9891 std::optional<bool> OverrideTailCall, MachinePointerInfo DstPtrInfo,
9892 MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo,
9893 BatchAAResults *BatchAA) {
9894 // Check to see if we should lower the memcpy to loads and stores first.
9895 // For cases within the target-specified limits, this is the best choice.
9897 if (ConstantSize) {
9898 // Memcpy with size zero? Just return the original chain.
9899 if (ConstantSize->isZero())
9900 return Chain;
9901
9903 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
9904 isVol, false, DstPtrInfo, SrcPtrInfo, AAInfo, BatchAA);
9905 if (Result.getNode())
9906 return Result;
9907 }
9908
9909 // Then check to see if we should lower the memcpy with target-specific
9910 // code. If the target chooses to do this, this is the next best.
9911 if (TSI) {
9912 SDValue Result = TSI->EmitTargetCodeForMemcpy(
9913 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline,
9914 DstPtrInfo, SrcPtrInfo);
9915 if (Result.getNode())
9916 return Result;
9917 }
9918
9919 // If we really need inline code and the target declined to provide it,
9920 // use a (potentially long) sequence of loads and stores.
9921 if (AlwaysInline) {
9922 assert(ConstantSize && "AlwaysInline requires a constant size!");
9924 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
9925 isVol, true, DstPtrInfo, SrcPtrInfo, AAInfo, BatchAA);
9926 }
9927
9930
9931 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
9932 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
9933 // respect volatile, so they may do things like read or write memory
9934 // beyond the given memory regions. But fixing this isn't easy, and most
9935 // people don't care.
9936
9937 // Emit a library call.
9940 Args.emplace_back(Dst, PtrTy);
9941 Args.emplace_back(Src, PtrTy);
9942 Args.emplace_back(Size, getDataLayout().getIntPtrType(*getContext()));
9943 // FIXME: pass in SDLoc
9945 bool IsTailCall = false;
9946 RTLIB::LibcallImpl MemCpyImpl = TLI->getMemcpyImpl();
9947
9948 if (OverrideTailCall.has_value()) {
9949 IsTailCall = *OverrideTailCall;
9950 } else {
9951 bool LowersToMemcpy = MemCpyImpl == RTLIB::impl_memcpy;
9952 IsTailCall = isInTailCallPositionWrapper(CI, this, LowersToMemcpy);
9953 }
9954
9955 CLI.setDebugLoc(dl)
9956 .setChain(Chain)
9957 .setLibCallee(
9958 Libcalls->getLibcallImplCallingConv(MemCpyImpl),
9959 Dst.getValueType().getTypeForEVT(*getContext()),
9960 getExternalSymbol(MemCpyImpl, TLI->getPointerTy(getDataLayout())),
9961 std::move(Args))
9963 .setTailCall(IsTailCall);
9964
9965 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
9966 return CallResult.second;
9967}
9968
9970 SDValue Dst, SDValue Src, SDValue Size,
9971 Type *SizeTy, unsigned ElemSz,
9972 bool isTailCall,
9973 MachinePointerInfo DstPtrInfo,
9974 MachinePointerInfo SrcPtrInfo) {
9975 // Emit a library call.
9978 Args.emplace_back(Dst, ArgTy);
9979 Args.emplace_back(Src, ArgTy);
9980 Args.emplace_back(Size, SizeTy);
9981
9982 RTLIB::Libcall LibraryCall =
9984 RTLIB::LibcallImpl LibcallImpl = Libcalls->getLibcallImpl(LibraryCall);
9985 if (LibcallImpl == RTLIB::Unsupported)
9986 report_fatal_error("Unsupported element size");
9987
9989 CLI.setDebugLoc(dl)
9990 .setChain(Chain)
9991 .setLibCallee(
9992 Libcalls->getLibcallImplCallingConv(LibcallImpl),
9994 getExternalSymbol(LibcallImpl, TLI->getPointerTy(getDataLayout())),
9995 std::move(Args))
9997 .setTailCall(isTailCall);
9998
9999 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
10000 return CallResult.second;
10001}
10002
10004 SDValue Src, SDValue Size, Align Alignment,
10005 bool isVol, const CallInst *CI,
10006 std::optional<bool> OverrideTailCall,
10007 MachinePointerInfo DstPtrInfo,
10008 MachinePointerInfo SrcPtrInfo,
10009 const AAMDNodes &AAInfo,
10010 BatchAAResults *BatchAA) {
10011 // Check to see if we should lower the memmove to loads and stores first.
10012 // For cases within the target-specified limits, this is the best choice.
10014 if (ConstantSize) {
10015 // Memmove with size zero? Just return the original chain.
10016 if (ConstantSize->isZero())
10017 return Chain;
10018
10020 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
10021 isVol, false, DstPtrInfo, SrcPtrInfo, AAInfo);
10022 if (Result.getNode())
10023 return Result;
10024 }
10025
10026 // Then check to see if we should lower the memmove with target-specific
10027 // code. If the target chooses to do this, this is the next best.
10028 if (TSI) {
10029 SDValue Result =
10030 TSI->EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size,
10031 Alignment, isVol, DstPtrInfo, SrcPtrInfo);
10032 if (Result.getNode())
10033 return Result;
10034 }
10035
10038
10039 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
10040 // not be safe. See memcpy above for more details.
10041
10042 // Emit a library call.
10045 Args.emplace_back(Dst, PtrTy);
10046 Args.emplace_back(Src, PtrTy);
10047 Args.emplace_back(Size, getDataLayout().getIntPtrType(*getContext()));
10048 // FIXME: pass in SDLoc
10050
10051 RTLIB::LibcallImpl MemmoveImpl = Libcalls->getLibcallImpl(RTLIB::MEMMOVE);
10052
10053 bool IsTailCall = false;
10054 if (OverrideTailCall.has_value()) {
10055 IsTailCall = *OverrideTailCall;
10056 } else {
10057 bool LowersToMemmove = MemmoveImpl == RTLIB::impl_memmove;
10058 IsTailCall = isInTailCallPositionWrapper(CI, this, LowersToMemmove);
10059 }
10060
10061 CLI.setDebugLoc(dl)
10062 .setChain(Chain)
10063 .setLibCallee(
10064 Libcalls->getLibcallImplCallingConv(MemmoveImpl),
10065 Dst.getValueType().getTypeForEVT(*getContext()),
10066 getExternalSymbol(MemmoveImpl, TLI->getPointerTy(getDataLayout())),
10067 std::move(Args))
10069 .setTailCall(IsTailCall);
10070
10071 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
10072 return CallResult.second;
10073}
10074
10076 SDValue Dst, SDValue Src, SDValue Size,
10077 Type *SizeTy, unsigned ElemSz,
10078 bool isTailCall,
10079 MachinePointerInfo DstPtrInfo,
10080 MachinePointerInfo SrcPtrInfo) {
10081 // Emit a library call.
10083 Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext());
10084 Args.emplace_back(Dst, IntPtrTy);
10085 Args.emplace_back(Src, IntPtrTy);
10086 Args.emplace_back(Size, SizeTy);
10087
10088 RTLIB::Libcall LibraryCall =
10090 RTLIB::LibcallImpl LibcallImpl = Libcalls->getLibcallImpl(LibraryCall);
10091 if (LibcallImpl == RTLIB::Unsupported)
10092 report_fatal_error("Unsupported element size");
10093
10095 CLI.setDebugLoc(dl)
10096 .setChain(Chain)
10097 .setLibCallee(
10098 Libcalls->getLibcallImplCallingConv(LibcallImpl),
10100 getExternalSymbol(LibcallImpl, TLI->getPointerTy(getDataLayout())),
10101 std::move(Args))
10103 .setTailCall(isTailCall);
10104
10105 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
10106 return CallResult.second;
10107}
10108
10110 SDValue Src, SDValue Size, Align Alignment,
10111 bool isVol, bool AlwaysInline,
10112 const CallInst *CI,
10113 MachinePointerInfo DstPtrInfo,
10114 const AAMDNodes &AAInfo) {
10115 // Check to see if we should lower the memset to stores first.
10116 // For cases within the target-specified limits, this is the best choice.
10118 if (ConstantSize) {
10119 // Memset with size zero? Just return the original chain.
10120 if (ConstantSize->isZero())
10121 return Chain;
10122
10123 SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src,
10124 ConstantSize->getZExtValue(), Alignment,
10125 isVol, false, DstPtrInfo, AAInfo);
10126
10127 if (Result.getNode())
10128 return Result;
10129 }
10130
10131 // Then check to see if we should lower the memset with target-specific
10132 // code. If the target chooses to do this, this is the next best.
10133 if (TSI) {
10134 SDValue Result = TSI->EmitTargetCodeForMemset(
10135 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline, DstPtrInfo);
10136 if (Result.getNode())
10137 return Result;
10138 }
10139
10140 // If we really need inline code and the target declined to provide it,
10141 // use a (potentially long) sequence of loads and stores.
10142 if (AlwaysInline) {
10143 assert(ConstantSize && "AlwaysInline requires a constant size!");
10144 SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src,
10145 ConstantSize->getZExtValue(), Alignment,
10146 isVol, true, DstPtrInfo, AAInfo);
10147 assert(Result &&
10148 "getMemsetStores must return a valid sequence when AlwaysInline");
10149 return Result;
10150 }
10151
10153
10154 // Emit a library call.
10155 auto &Ctx = *getContext();
10156 const auto& DL = getDataLayout();
10157
10159 // FIXME: pass in SDLoc
10160 CLI.setDebugLoc(dl).setChain(Chain);
10161
10162 RTLIB::LibcallImpl BzeroImpl = Libcalls->getLibcallImpl(RTLIB::BZERO);
10163 bool UseBZero = BzeroImpl != RTLIB::Unsupported && isNullConstant(Src);
10164
10165 // If zeroing out and bzero is present, use it.
10166 if (UseBZero) {
10168 Args.emplace_back(Dst, PointerType::getUnqual(Ctx));
10169 Args.emplace_back(Size, DL.getIntPtrType(Ctx));
10170 CLI.setLibCallee(
10171 Libcalls->getLibcallImplCallingConv(BzeroImpl), Type::getVoidTy(Ctx),
10172 getExternalSymbol(BzeroImpl, TLI->getPointerTy(DL)), std::move(Args));
10173 } else {
10174 RTLIB::LibcallImpl MemsetImpl = Libcalls->getLibcallImpl(RTLIB::MEMSET);
10175
10177 Args.emplace_back(Dst, PointerType::getUnqual(Ctx));
10178 Args.emplace_back(Src, Src.getValueType().getTypeForEVT(Ctx));
10179 Args.emplace_back(Size, DL.getIntPtrType(Ctx));
10180 CLI.setLibCallee(Libcalls->getLibcallImplCallingConv(MemsetImpl),
10181 Dst.getValueType().getTypeForEVT(Ctx),
10182 getExternalSymbol(MemsetImpl, TLI->getPointerTy(DL)),
10183 std::move(Args));
10184 }
10185
10186 RTLIB::LibcallImpl MemsetImpl = Libcalls->getLibcallImpl(RTLIB::MEMSET);
10187 bool LowersToMemset = MemsetImpl == RTLIB::impl_memset;
10188
10189 // If we're going to use bzero, make sure not to tail call unless the
10190 // subsequent return doesn't need a value, as bzero doesn't return the first
10191 // arg unlike memset.
10192 bool ReturnsFirstArg = CI && funcReturnsFirstArgOfCall(*CI) && !UseBZero;
10193 bool IsTailCall =
10194 CI && CI->isTailCall() &&
10195 isInTailCallPosition(*CI, getTarget(), ReturnsFirstArg && LowersToMemset);
10196 CLI.setDiscardResult().setTailCall(IsTailCall);
10197
10198 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
10199 return CallResult.second;
10200}
10201
10204 Type *SizeTy, unsigned ElemSz,
10205 bool isTailCall,
10206 MachinePointerInfo DstPtrInfo) {
10207 // Emit a library call.
10209 Args.emplace_back(Dst, getDataLayout().getIntPtrType(*getContext()));
10210 Args.emplace_back(Value, Type::getInt8Ty(*getContext()));
10211 Args.emplace_back(Size, SizeTy);
10212
10213 RTLIB::Libcall LibraryCall =
10215 RTLIB::LibcallImpl LibcallImpl = Libcalls->getLibcallImpl(LibraryCall);
10216 if (LibcallImpl == RTLIB::Unsupported)
10217 report_fatal_error("Unsupported element size");
10218
10220 CLI.setDebugLoc(dl)
10221 .setChain(Chain)
10222 .setLibCallee(
10223 Libcalls->getLibcallImplCallingConv(LibcallImpl),
10225 getExternalSymbol(LibcallImpl, TLI->getPointerTy(getDataLayout())),
10226 std::move(Args))
10228 .setTailCall(isTailCall);
10229
10230 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
10231 return CallResult.second;
10232}
10233
10234SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
10236 MachineMemOperand *MMO,
10237 ISD::LoadExtType ExtType) {
10239 AddNodeIDNode(ID, Opcode, VTList, Ops);
10240 ID.AddInteger(MemVT.getRawBits());
10241 ID.AddInteger(getSyntheticNodeSubclassData<AtomicSDNode>(
10242 dl.getIROrder(), Opcode, VTList, MemVT, MMO, ExtType));
10243 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
10244 ID.AddInteger(MMO->getFlags());
10245 void* IP = nullptr;
10246 if (auto *E = cast_or_null<AtomicSDNode>(FindNodeOrInsertPos(ID, dl, IP))) {
10247 E->refineAlignment(MMO);
10248 E->refineRanges(MMO);
10249 return SDValue(E, 0);
10250 }
10251
10252 auto *N = newSDNode<AtomicSDNode>(dl.getIROrder(), dl.getDebugLoc(), Opcode,
10253 VTList, MemVT, MMO, ExtType);
10254 createOperands(N, Ops);
10255
10256 CSEMap.InsertNode(N, IP);
10257 InsertNode(N);
10258 SDValue V(N, 0);
10259 NewSDValueDbgMsg(V, "Creating new node: ", this);
10260 return V;
10261}
10262
10264 EVT MemVT, SDVTList VTs, SDValue Chain,
10265 SDValue Ptr, SDValue Cmp, SDValue Swp,
10266 MachineMemOperand *MMO) {
10267 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
10269 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
10270
10271 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
10272 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
10273}
10274
10275SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
10276 SDValue Chain, SDValue Ptr, SDValue Val,
10277 MachineMemOperand *MMO) {
10278 assert((Opcode == ISD::ATOMIC_LOAD_ADD || Opcode == ISD::ATOMIC_LOAD_SUB ||
10279 Opcode == ISD::ATOMIC_LOAD_AND || Opcode == ISD::ATOMIC_LOAD_CLR ||
10280 Opcode == ISD::ATOMIC_LOAD_OR || Opcode == ISD::ATOMIC_LOAD_XOR ||
10281 Opcode == ISD::ATOMIC_LOAD_NAND || Opcode == ISD::ATOMIC_LOAD_MIN ||
10282 Opcode == ISD::ATOMIC_LOAD_MAX || Opcode == ISD::ATOMIC_LOAD_UMIN ||
10283 Opcode == ISD::ATOMIC_LOAD_UMAX || Opcode == ISD::ATOMIC_LOAD_FADD ||
10284 Opcode == ISD::ATOMIC_LOAD_FSUB || Opcode == ISD::ATOMIC_LOAD_FMAX ||
10285 Opcode == ISD::ATOMIC_LOAD_FMIN ||
10286 Opcode == ISD::ATOMIC_LOAD_FMINIMUM ||
10287 Opcode == ISD::ATOMIC_LOAD_FMAXIMUM ||
10288 Opcode == ISD::ATOMIC_LOAD_UINC_WRAP ||
10289 Opcode == ISD::ATOMIC_LOAD_UDEC_WRAP ||
10290 Opcode == ISD::ATOMIC_LOAD_USUB_COND ||
10291 Opcode == ISD::ATOMIC_LOAD_USUB_SAT || Opcode == ISD::ATOMIC_SWAP ||
10292 Opcode == ISD::ATOMIC_STORE) &&
10293 "Invalid Atomic Op");
10294
10295 EVT VT = Val.getValueType();
10296
10297 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
10298 getVTList(VT, MVT::Other);
10299 SDValue Ops[] = {Chain, Ptr, Val};
10300 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
10301}
10302
10304 EVT MemVT, EVT VT, SDValue Chain,
10305 SDValue Ptr, MachineMemOperand *MMO) {
10306 SDVTList VTs = getVTList(VT, MVT::Other);
10307 SDValue Ops[] = {Chain, Ptr};
10308 return getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, VTs, Ops, MMO, ExtType);
10309}
10310
10311/// getMergeValues - Create a MERGE_VALUES node from the given operands.
10313 if (Ops.size() == 1)
10314 return Ops[0];
10315
10317 VTs.reserve(Ops.size());
10318 for (const SDValue &Op : Ops)
10319 VTs.push_back(Op.getValueType());
10320 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
10321}
10322
10324 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
10325 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
10327 const AAMDNodes &AAInfo) {
10328 if (Size.hasValue() && !Size.getValue())
10330
10332 MachineMemOperand *MMO =
10333 MF.getMachineMemOperand(PtrInfo, Flags, Size, Alignment, AAInfo);
10334
10335 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
10336}
10337
10339 SDVTList VTList,
10340 ArrayRef<SDValue> Ops, EVT MemVT,
10341 MachineMemOperand *MMO) {
10342 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, ArrayRef(MMO));
10343}
10344
10346 SDVTList VTList,
10347 ArrayRef<SDValue> Ops, EVT MemVT,
10349 assert(!MMOs.empty() && "Must have at least one MMO");
10350 assert(
10351 (Opcode == ISD::INTRINSIC_VOID || Opcode == ISD::INTRINSIC_W_CHAIN ||
10352 Opcode == ISD::PREFETCH ||
10353 (Opcode <= (unsigned)std::numeric_limits<int>::max() &&
10354 Opcode >= ISD::BUILTIN_OP_END && TSI->isTargetMemoryOpcode(Opcode))) &&
10355 "Opcode is not a memory-accessing opcode!");
10356
10358 if (MMOs.size() == 1) {
10359 MemRefs = MMOs[0];
10360 } else {
10361 // Allocate: [size_t count][MMO*][MMO*]...
10362 size_t AllocSize =
10363 sizeof(size_t) + MMOs.size() * sizeof(MachineMemOperand *);
10364 void *Buffer = Allocator.Allocate(AllocSize, alignof(size_t));
10365 size_t *CountPtr = static_cast<size_t *>(Buffer);
10366 *CountPtr = MMOs.size();
10367 MachineMemOperand **Array =
10368 reinterpret_cast<MachineMemOperand **>(CountPtr + 1);
10369 llvm::copy(MMOs, Array);
10370 MemRefs = Array;
10371 }
10372
10373 // Memoize the node unless it returns a glue result.
10375 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
10377 AddNodeIDNode(ID, Opcode, VTList, Ops);
10378 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
10379 Opcode, dl.getIROrder(), VTList, MemVT, MemRefs));
10380 ID.AddInteger(MemVT.getRawBits());
10381 for (const MachineMemOperand *MMO : MMOs) {
10382 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
10383 ID.AddInteger(MMO->getFlags());
10384 }
10385 void *IP = nullptr;
10386 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
10387 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMOs);
10388 return SDValue(E, 0);
10389 }
10390
10391 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
10392 VTList, MemVT, MemRefs);
10393 createOperands(N, Ops);
10394 CSEMap.InsertNode(N, IP);
10395 } else {
10396 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
10397 VTList, MemVT, MemRefs);
10398 createOperands(N, Ops);
10399 }
10400 InsertNode(N);
10401 SDValue V(N, 0);
10402 NewSDValueDbgMsg(V, "Creating new node: ", this);
10403 return V;
10404}
10405
10407 SDValue Chain, int FrameIndex) {
10408 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END;
10409 const auto VTs = getVTList(MVT::Other);
10410 SDValue Ops[2] = {
10411 Chain,
10412 getFrameIndex(FrameIndex,
10413 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()),
10414 true)};
10415
10417 AddNodeIDNode(ID, Opcode, VTs, Ops);
10418 ID.AddInteger(FrameIndex);
10419 void *IP = nullptr;
10420 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
10421 return SDValue(E, 0);
10422
10423 LifetimeSDNode *N =
10424 newSDNode<LifetimeSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs);
10425 createOperands(N, Ops);
10426 CSEMap.InsertNode(N, IP);
10427 InsertNode(N);
10428 SDValue V(N, 0);
10429 NewSDValueDbgMsg(V, "Creating new node: ", this);
10430 return V;
10431}
10432
10434 uint64_t Guid, uint64_t Index,
10435 uint32_t Attr) {
10436 const unsigned Opcode = ISD::PSEUDO_PROBE;
10437 const auto VTs = getVTList(MVT::Other);
10438 SDValue Ops[] = {Chain};
10440 AddNodeIDNode(ID, Opcode, VTs, Ops);
10441 ID.AddInteger(Guid);
10442 ID.AddInteger(Index);
10443 void *IP = nullptr;
10444 if (SDNode *E = FindNodeOrInsertPos(ID, Dl, IP))
10445 return SDValue(E, 0);
10446
10447 auto *N = newSDNode<PseudoProbeSDNode>(
10448 Opcode, Dl.getIROrder(), Dl.getDebugLoc(), VTs, Guid, Index, Attr);
10449 createOperands(N, Ops);
10450 CSEMap.InsertNode(N, IP);
10451 InsertNode(N);
10452 SDValue V(N, 0);
10453 NewSDValueDbgMsg(V, "Creating new node: ", this);
10454 return V;
10455}
10456
10457/// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
10458/// MachinePointerInfo record from it. This is particularly useful because the
10459/// code generator has many cases where it doesn't bother passing in a
10460/// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
10462 SelectionDAG &DAG, SDValue Ptr,
10463 int64_t Offset = 0) {
10464 // If this is FI+Offset, we can model it.
10465 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
10467 FI->getIndex(), Offset);
10468
10469 // If this is (FI+Offset1)+Offset2, we can model it.
10470 if (Ptr.getOpcode() != ISD::ADD ||
10473 return Info;
10474
10475 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
10477 DAG.getMachineFunction(), FI,
10478 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
10479}
10480
10481/// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
10482/// MachinePointerInfo record from it. This is particularly useful because the
10483/// code generator has many cases where it doesn't bother passing in a
10484/// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
10486 SelectionDAG &DAG, SDValue Ptr,
10487 SDValue OffsetOp) {
10488 // If the 'Offset' value isn't a constant, we can't handle this.
10490 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue());
10491 if (OffsetOp.isUndef())
10492 return InferPointerInfo(Info, DAG, Ptr);
10493 return Info;
10494}
10495
10497 EVT VT, const SDLoc &dl, SDValue Chain,
10498 SDValue Ptr, SDValue Offset,
10499 MachinePointerInfo PtrInfo, EVT MemVT,
10500 Align Alignment,
10501 MachineMemOperand::Flags MMOFlags,
10502 const AAMDNodes &AAInfo, const MDNode *Ranges) {
10503 assert(Chain.getValueType() == MVT::Other &&
10504 "Invalid chain type");
10505
10506 MMOFlags |= MachineMemOperand::MOLoad;
10507 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
10508 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
10509 // clients.
10510 if (PtrInfo.V.isNull())
10511 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
10512
10513 TypeSize Size = MemVT.getStoreSize();
10515 MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size,
10516 Alignment, AAInfo, Ranges);
10517 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
10518}
10519
10521 EVT VT, const SDLoc &dl, SDValue Chain,
10522 SDValue Ptr, SDValue Offset, EVT MemVT,
10523 MachineMemOperand *MMO) {
10524 if (VT == MemVT) {
10525 ExtType = ISD::NON_EXTLOAD;
10526 } else if (ExtType == ISD::NON_EXTLOAD) {
10527 assert(VT == MemVT && "Non-extending load from different memory type!");
10528 } else {
10529 // Extending load.
10530 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
10531 "Should only be an extending load, not truncating!");
10532 assert(VT.isInteger() == MemVT.isInteger() &&
10533 "Cannot convert from FP to Int or Int -> FP!");
10534 assert(VT.isVector() == MemVT.isVector() &&
10535 "Cannot use an ext load to convert to or from a vector!");
10536 assert((!VT.isVector() ||
10538 "Cannot use an ext load to change the number of vector elements!");
10539 }
10540
10541 assert((!MMO->getRanges() ||
10543 ->getBitWidth() == MemVT.getScalarSizeInBits() &&
10544 MemVT.isInteger())) &&
10545 "Range metadata and load type must match!");
10546
10547 bool Indexed = AM != ISD::UNINDEXED;
10548 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
10549
10550 SDVTList VTs = Indexed ?
10551 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
10552 SDValue Ops[] = { Chain, Ptr, Offset };
10554 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
10555 ID.AddInteger(MemVT.getRawBits());
10556 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
10557 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO));
10558 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
10559 ID.AddInteger(MMO->getFlags());
10560 void *IP = nullptr;
10561 if (auto *E = cast_or_null<LoadSDNode>(FindNodeOrInsertPos(ID, dl, IP))) {
10562 E->refineAlignment(MMO);
10563 E->refineRanges(MMO);
10564 return SDValue(E, 0);
10565 }
10566 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
10567 ExtType, MemVT, MMO);
10568 createOperands(N, Ops);
10569
10570 CSEMap.InsertNode(N, IP);
10571 InsertNode(N);
10572 SDValue V(N, 0);
10573 NewSDValueDbgMsg(V, "Creating new node: ", this);
10574 return V;
10575}
10576
10578 SDValue Ptr, MachinePointerInfo PtrInfo,
10579 MaybeAlign Alignment,
10580 MachineMemOperand::Flags MMOFlags,
10581 const AAMDNodes &AAInfo, const MDNode *Ranges) {
10583 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
10584 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
10585}
10586
10588 SDValue Ptr, MachineMemOperand *MMO) {
10590 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
10591 VT, MMO);
10592}
10593
10595 EVT VT, SDValue Chain, SDValue Ptr,
10596 MachinePointerInfo PtrInfo, EVT MemVT,
10597 MaybeAlign Alignment,
10598 MachineMemOperand::Flags MMOFlags,
10599 const AAMDNodes &AAInfo) {
10601 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo,
10602 MemVT, Alignment, MMOFlags, AAInfo);
10603}
10604
10606 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT,
10607 MachineMemOperand *MMO) {
10609 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
10610 MemVT, MMO);
10611}
10612
10616 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
10617 assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
10618 // Don't propagate the invariant or dereferenceable flags.
10619 auto MMOFlags =
10620 LD->getMemOperand()->getFlags() &
10622 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
10623 LD->getChain(), Base, Offset, LD->getPointerInfo(),
10624 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo());
10625}
10626
10628 SDValue Ptr, MachinePointerInfo PtrInfo,
10629 Align Alignment,
10630 MachineMemOperand::Flags MMOFlags,
10631 const AAMDNodes &AAInfo) {
10632 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
10633
10634 MMOFlags |= MachineMemOperand::MOStore;
10635 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
10636
10637 if (PtrInfo.V.isNull())
10638 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
10639
10642 MachineMemOperand *MMO =
10643 MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo);
10644 return getStore(Chain, dl, Val, Ptr, MMO);
10645}
10646
10648 SDValue Ptr, MachineMemOperand *MMO) {
10650 return getStore(Chain, dl, Val, Ptr, Undef, Val.getValueType(), MMO,
10652}
10653
10655 SDValue Ptr, SDValue Offset, EVT SVT,
10657 bool IsTruncating) {
10658 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
10659 EVT VT = Val.getValueType();
10660 if (VT == SVT) {
10661 IsTruncating = false;
10662 } else if (!IsTruncating) {
10663 assert(VT == SVT && "No-truncating store from different memory type!");
10664 } else {
10666 "Should only be a truncating store, not extending!");
10667 assert(VT.isInteger() == SVT.isInteger() && "Can't do FP-INT conversion!");
10668 assert(VT.isVector() == SVT.isVector() &&
10669 "Cannot use trunc store to convert to or from a vector!");
10670 assert((!VT.isVector() ||
10672 "Cannot use trunc store to change the number of vector elements!");
10673 }
10674
10675 bool Indexed = AM != ISD::UNINDEXED;
10676 assert((Indexed || Offset.isUndef()) && "Unindexed store with an offset!");
10677 SDVTList VTs = Indexed ? getVTList(Ptr.getValueType(), MVT::Other)
10678 : getVTList(MVT::Other);
10679 SDValue Ops[] = {Chain, Val, Ptr, Offset};
10682 ID.AddInteger(SVT.getRawBits());
10683 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
10684 dl.getIROrder(), VTs, AM, IsTruncating, SVT, MMO));
10685 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
10686 ID.AddInteger(MMO->getFlags());
10687 void *IP = nullptr;
10688 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
10689 cast<StoreSDNode>(E)->refineAlignment(MMO);
10690 return SDValue(E, 0);
10691 }
10692 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
10693 IsTruncating, SVT, MMO);
10694 createOperands(N, Ops);
10695
10696 CSEMap.InsertNode(N, IP);
10697 InsertNode(N);
10698 SDValue V(N, 0);
10699 NewSDValueDbgMsg(V, "Creating new node: ", this);
10700 return V;
10701}
10702
10704 SDValue Ptr, MachinePointerInfo PtrInfo,
10705 EVT SVT, Align Alignment,
10706 MachineMemOperand::Flags MMOFlags,
10707 const AAMDNodes &AAInfo) {
10708 assert(Chain.getValueType() == MVT::Other &&
10709 "Invalid chain type");
10710
10711 MMOFlags |= MachineMemOperand::MOStore;
10712 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
10713
10714 if (PtrInfo.V.isNull())
10715 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
10716
10718 MachineMemOperand *MMO = MF.getMachineMemOperand(
10719 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo);
10720 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
10721}
10722
10724 SDValue Ptr, EVT SVT,
10725 MachineMemOperand *MMO) {
10727 return getStore(Chain, dl, Val, Ptr, Undef, SVT, MMO, ISD::UNINDEXED, true);
10728}
10729
10733 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
10734 assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
10735 return getStore(ST->getChain(), dl, ST->getValue(), Base, Offset,
10736 ST->getMemoryVT(), ST->getMemOperand(), AM,
10737 ST->isTruncatingStore());
10738}
10739
10741 ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl,
10742 SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL,
10743 MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment,
10744 MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo,
10745 const MDNode *Ranges, bool IsExpanding) {
10746 MMOFlags |= MachineMemOperand::MOLoad;
10747 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
10748 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
10749 // clients.
10750 if (PtrInfo.V.isNull())
10751 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
10752
10753 TypeSize Size = MemVT.getStoreSize();
10755 MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size,
10756 Alignment, AAInfo, Ranges);
10757 return getLoadVP(AM, ExtType, VT, dl, Chain, Ptr, Offset, Mask, EVL, MemVT,
10758 MMO, IsExpanding);
10759}
10760
10762 ISD::LoadExtType ExtType, EVT VT,
10763 const SDLoc &dl, SDValue Chain, SDValue Ptr,
10764 SDValue Offset, SDValue Mask, SDValue EVL,
10765 EVT MemVT, MachineMemOperand *MMO,
10766 bool IsExpanding) {
10767 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
10768 assert(Mask.getValueType().getVectorElementCount() ==
10769 VT.getVectorElementCount() &&
10770 "Vector width mismatch between mask and data");
10771
10772 bool Indexed = AM != ISD::UNINDEXED;
10773 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
10774
10775 SDVTList VTs = Indexed ? getVTList(VT, Ptr.getValueType(), MVT::Other)
10776 : getVTList(VT, MVT::Other);
10777 SDValue Ops[] = {Chain, Ptr, Offset, Mask, EVL};
10779 AddNodeIDNode(ID, ISD::VP_LOAD, VTs, Ops);
10780 ID.AddInteger(MemVT.getRawBits());
10781 ID.AddInteger(getSyntheticNodeSubclassData<VPLoadSDNode>(
10782 dl.getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
10783 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
10784 ID.AddInteger(MMO->getFlags());
10785 void *IP = nullptr;
10786 if (auto *E = cast_or_null<VPLoadSDNode>(FindNodeOrInsertPos(ID, dl, IP))) {
10787 E->refineAlignment(MMO);
10788 E->refineRanges(MMO);
10789 return SDValue(E, 0);
10790 }
10791 auto *N = newSDNode<VPLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
10792 ExtType, IsExpanding, MemVT, MMO);
10793 createOperands(N, Ops);
10794
10795 CSEMap.InsertNode(N, IP);
10796 InsertNode(N);
10797 SDValue V(N, 0);
10798 NewSDValueDbgMsg(V, "Creating new node: ", this);
10799 return V;
10800}
10801
10803 SDValue Ptr, SDValue Mask, SDValue EVL,
10804 MachinePointerInfo PtrInfo,
10805 MaybeAlign Alignment,
10806 MachineMemOperand::Flags MMOFlags,
10807 const AAMDNodes &AAInfo, const MDNode *Ranges,
10808 bool IsExpanding) {
10810 return getLoadVP(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
10811 Mask, EVL, PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges,
10812 IsExpanding);
10813}
10814
10816 SDValue Ptr, SDValue Mask, SDValue EVL,
10817 MachineMemOperand *MMO, bool IsExpanding) {
10819 return getLoadVP(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
10820 Mask, EVL, VT, MMO, IsExpanding);
10821}
10822
10824 EVT VT, SDValue Chain, SDValue Ptr,
10825 SDValue Mask, SDValue EVL,
10826 MachinePointerInfo PtrInfo, EVT MemVT,
10827 MaybeAlign Alignment,
10828 MachineMemOperand::Flags MMOFlags,
10829 const AAMDNodes &AAInfo, bool IsExpanding) {
10831 return getLoadVP(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, Mask,
10832 EVL, PtrInfo, MemVT, Alignment, MMOFlags, AAInfo, nullptr,
10833 IsExpanding);
10834}
10835
10837 EVT VT, SDValue Chain, SDValue Ptr,
10838 SDValue Mask, SDValue EVL, EVT MemVT,
10839 MachineMemOperand *MMO, bool IsExpanding) {
10841 return getLoadVP(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, Mask,
10842 EVL, MemVT, MMO, IsExpanding);
10843}
10844
10848 auto *LD = cast<VPLoadSDNode>(OrigLoad);
10849 assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
10850 // Don't propagate the invariant or dereferenceable flags.
10851 auto MMOFlags =
10852 LD->getMemOperand()->getFlags() &
10854 return getLoadVP(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
10855 LD->getChain(), Base, Offset, LD->getMask(),
10856 LD->getVectorLength(), LD->getPointerInfo(),
10857 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo(),
10858 nullptr, LD->isExpandingLoad());
10859}
10860
10862 SDValue Ptr, SDValue Offset, SDValue Mask,
10863 SDValue EVL, EVT MemVT, MachineMemOperand *MMO,
10864 ISD::MemIndexedMode AM, bool IsTruncating,
10865 bool IsCompressing) {
10866 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
10867 assert(Mask.getValueType().getVectorElementCount() ==
10869 "Vector width mismatch between mask and data");
10870
10871 bool Indexed = AM != ISD::UNINDEXED;
10872 assert((Indexed || Offset.isUndef()) && "Unindexed vp_store with an offset!");
10873 SDVTList VTs = Indexed ? getVTList(Ptr.getValueType(), MVT::Other)
10874 : getVTList(MVT::Other);
10875 SDValue Ops[] = {Chain, Val, Ptr, Offset, Mask, EVL};
10877 AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops);
10878 ID.AddInteger(MemVT.getRawBits());
10879 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
10880 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
10881 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
10882 ID.AddInteger(MMO->getFlags());
10883 void *IP = nullptr;
10884 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
10885 cast<VPStoreSDNode>(E)->refineAlignment(MMO);
10886 return SDValue(E, 0);
10887 }
10888 auto *N = newSDNode<VPStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
10889 IsTruncating, IsCompressing, MemVT, MMO);
10890 createOperands(N, Ops);
10891
10892 CSEMap.InsertNode(N, IP);
10893 InsertNode(N);
10894 SDValue V(N, 0);
10895 NewSDValueDbgMsg(V, "Creating new node: ", this);
10896 return V;
10897}
10898
10900 SDValue Val, SDValue Ptr, SDValue Mask,
10901 SDValue EVL, MachinePointerInfo PtrInfo,
10902 EVT SVT, Align Alignment,
10903 MachineMemOperand::Flags MMOFlags,
10904 const AAMDNodes &AAInfo,
10905 bool IsCompressing) {
10906 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
10907
10908 MMOFlags |= MachineMemOperand::MOStore;
10909 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
10910
10911 if (PtrInfo.V.isNull())
10912 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
10913
10915 MachineMemOperand *MMO = MF.getMachineMemOperand(
10916 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo);
10917 return getTruncStoreVP(Chain, dl, Val, Ptr, Mask, EVL, SVT, MMO,
10918 IsCompressing);
10919}
10920
10922 SDValue Val, SDValue Ptr, SDValue Mask,
10923 SDValue EVL, EVT SVT,
10924 MachineMemOperand *MMO,
10925 bool IsCompressing) {
10926 EVT VT = Val.getValueType();
10927
10928 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
10929 if (VT == SVT)
10930 return getStoreVP(Chain, dl, Val, Ptr, getUNDEF(Ptr.getValueType()), Mask,
10931 EVL, VT, MMO, ISD::UNINDEXED,
10932 /*IsTruncating*/ false, IsCompressing);
10933
10935 "Should only be a truncating store, not extending!");
10936 assert(VT.isInteger() == SVT.isInteger() && "Can't do FP-INT conversion!");
10937 assert(VT.isVector() == SVT.isVector() &&
10938 "Cannot use trunc store to convert to or from a vector!");
10939 assert((!VT.isVector() ||
10941 "Cannot use trunc store to change the number of vector elements!");
10942
10943 SDVTList VTs = getVTList(MVT::Other);
10945 SDValue Ops[] = {Chain, Val, Ptr, Undef, Mask, EVL};
10947 AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops);
10948 ID.AddInteger(SVT.getRawBits());
10949 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
10950 dl.getIROrder(), VTs, ISD::UNINDEXED, true, IsCompressing, SVT, MMO));
10951 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
10952 ID.AddInteger(MMO->getFlags());
10953 void *IP = nullptr;
10954 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
10955 cast<VPStoreSDNode>(E)->refineAlignment(MMO);
10956 return SDValue(E, 0);
10957 }
10958 auto *N =
10959 newSDNode<VPStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
10960 ISD::UNINDEXED, true, IsCompressing, SVT, MMO);
10961 createOperands(N, Ops);
10962
10963 CSEMap.InsertNode(N, IP);
10964 InsertNode(N);
10965 SDValue V(N, 0);
10966 NewSDValueDbgMsg(V, "Creating new node: ", this);
10967 return V;
10968}
10969
10973 auto *ST = cast<VPStoreSDNode>(OrigStore);
10974 assert(ST->getOffset().isUndef() && "Store is already an indexed store!");
10975 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
10976 SDValue Ops[] = {ST->getChain(), ST->getValue(), Base,
10977 Offset, ST->getMask(), ST->getVectorLength()};
10979 AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops);
10980 ID.AddInteger(ST->getMemoryVT().getRawBits());
10981 ID.AddInteger(ST->getRawSubclassData());
10982 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
10983 ID.AddInteger(ST->getMemOperand()->getFlags());
10984 void *IP = nullptr;
10985 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
10986 return SDValue(E, 0);
10987
10988 auto *N = newSDNode<VPStoreSDNode>(
10989 dl.getIROrder(), dl.getDebugLoc(), VTs, AM, ST->isTruncatingStore(),
10990 ST->isCompressingStore(), ST->getMemoryVT(), ST->getMemOperand());
10991 createOperands(N, Ops);
10992
10993 CSEMap.InsertNode(N, IP);
10994 InsertNode(N);
10995 SDValue V(N, 0);
10996 NewSDValueDbgMsg(V, "Creating new node: ", this);
10997 return V;
10998}
10999
11001 ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL,
11002 SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask,
11003 SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding) {
11004 bool Indexed = AM != ISD::UNINDEXED;
11005 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
11006
11007 SDValue Ops[] = {Chain, Ptr, Offset, Stride, Mask, EVL};
11008 SDVTList VTs = Indexed ? getVTList(VT, Ptr.getValueType(), MVT::Other)
11009 : getVTList(VT, MVT::Other);
11011 AddNodeIDNode(ID, ISD::EXPERIMENTAL_VP_STRIDED_LOAD, VTs, Ops);
11012 ID.AddInteger(VT.getRawBits());
11013 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedLoadSDNode>(
11014 DL.getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
11015 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11016
11017 void *IP = nullptr;
11018 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
11019 cast<VPStridedLoadSDNode>(E)->refineAlignment(MMO);
11020 return SDValue(E, 0);
11021 }
11022
11023 auto *N =
11024 newSDNode<VPStridedLoadSDNode>(DL.getIROrder(), DL.getDebugLoc(), VTs, AM,
11025 ExtType, IsExpanding, MemVT, MMO);
11026 createOperands(N, Ops);
11027 CSEMap.InsertNode(N, IP);
11028 InsertNode(N);
11029 SDValue V(N, 0);
11030 NewSDValueDbgMsg(V, "Creating new node: ", this);
11031 return V;
11032}
11033
11035 SDValue Ptr, SDValue Stride,
11036 SDValue Mask, SDValue EVL,
11037 MachineMemOperand *MMO,
11038 bool IsExpanding) {
11040 return getStridedLoadVP(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, DL, Chain, Ptr,
11041 Undef, Stride, Mask, EVL, VT, MMO, IsExpanding);
11042}
11043
11045 ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT, SDValue Chain,
11046 SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT,
11047 MachineMemOperand *MMO, bool IsExpanding) {
11049 return getStridedLoadVP(ISD::UNINDEXED, ExtType, VT, DL, Chain, Ptr, Undef,
11050 Stride, Mask, EVL, MemVT, MMO, IsExpanding);
11051}
11052
11054 SDValue Val, SDValue Ptr,
11055 SDValue Offset, SDValue Stride,
11056 SDValue Mask, SDValue EVL, EVT MemVT,
11057 MachineMemOperand *MMO,
11059 bool IsTruncating, bool IsCompressing) {
11060 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
11061 bool Indexed = AM != ISD::UNINDEXED;
11062 assert((Indexed || Offset.isUndef()) && "Unindexed vp_store with an offset!");
11063 SDVTList VTs = Indexed ? getVTList(Ptr.getValueType(), MVT::Other)
11064 : getVTList(MVT::Other);
11065 SDValue Ops[] = {Chain, Val, Ptr, Offset, Stride, Mask, EVL};
11067 AddNodeIDNode(ID, ISD::EXPERIMENTAL_VP_STRIDED_STORE, VTs, Ops);
11068 ID.AddInteger(MemVT.getRawBits());
11069 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
11070 DL.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
11071 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11072 void *IP = nullptr;
11073 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
11074 cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO);
11075 return SDValue(E, 0);
11076 }
11077 auto *N = newSDNode<VPStridedStoreSDNode>(DL.getIROrder(), DL.getDebugLoc(),
11078 VTs, AM, IsTruncating,
11079 IsCompressing, MemVT, MMO);
11080 createOperands(N, Ops);
11081
11082 CSEMap.InsertNode(N, IP);
11083 InsertNode(N);
11084 SDValue V(N, 0);
11085 NewSDValueDbgMsg(V, "Creating new node: ", this);
11086 return V;
11087}
11088
11090 SDValue Val, SDValue Ptr,
11091 SDValue Stride, SDValue Mask,
11092 SDValue EVL, EVT SVT,
11093 MachineMemOperand *MMO,
11094 bool IsCompressing) {
11095 EVT VT = Val.getValueType();
11096
11097 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
11098 if (VT == SVT)
11099 return getStridedStoreVP(Chain, DL, Val, Ptr, getUNDEF(Ptr.getValueType()),
11100 Stride, Mask, EVL, VT, MMO, ISD::UNINDEXED,
11101 /*IsTruncating*/ false, IsCompressing);
11102
11104 "Should only be a truncating store, not extending!");
11105 assert(VT.isInteger() == SVT.isInteger() && "Can't do FP-INT conversion!");
11106 assert(VT.isVector() == SVT.isVector() &&
11107 "Cannot use trunc store to convert to or from a vector!");
11108 assert((!VT.isVector() ||
11110 "Cannot use trunc store to change the number of vector elements!");
11111
11112 SDVTList VTs = getVTList(MVT::Other);
11114 SDValue Ops[] = {Chain, Val, Ptr, Undef, Stride, Mask, EVL};
11116 AddNodeIDNode(ID, ISD::EXPERIMENTAL_VP_STRIDED_STORE, VTs, Ops);
11117 ID.AddInteger(SVT.getRawBits());
11118 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
11119 DL.getIROrder(), VTs, ISD::UNINDEXED, true, IsCompressing, SVT, MMO));
11120 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11121 void *IP = nullptr;
11122 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
11123 cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO);
11124 return SDValue(E, 0);
11125 }
11126 auto *N = newSDNode<VPStridedStoreSDNode>(DL.getIROrder(), DL.getDebugLoc(),
11127 VTs, ISD::UNINDEXED, true,
11128 IsCompressing, SVT, MMO);
11129 createOperands(N, Ops);
11130
11131 CSEMap.InsertNode(N, IP);
11132 InsertNode(N);
11133 SDValue V(N, 0);
11134 NewSDValueDbgMsg(V, "Creating new node: ", this);
11135 return V;
11136}
11137
11140 ISD::MemIndexType IndexType) {
11141 assert(Ops.size() == 6 && "Incompatible number of operands");
11142
11144 AddNodeIDNode(ID, ISD::VP_GATHER, VTs, Ops);
11145 ID.AddInteger(VT.getRawBits());
11146 ID.AddInteger(getSyntheticNodeSubclassData<VPGatherSDNode>(
11147 dl.getIROrder(), VTs, VT, MMO, IndexType));
11148 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11149 ID.AddInteger(MMO->getFlags());
11150 void *IP = nullptr;
11151 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
11152 cast<VPGatherSDNode>(E)->refineAlignment(MMO);
11153 return SDValue(E, 0);
11154 }
11155
11156 auto *N = newSDNode<VPGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
11157 VT, MMO, IndexType);
11158 createOperands(N, Ops);
11159
11160 assert(N->getMask().getValueType().getVectorElementCount() ==
11161 N->getValueType(0).getVectorElementCount() &&
11162 "Vector width mismatch between mask and data");
11163 assert(N->getIndex().getValueType().getVectorElementCount().isScalable() ==
11164 N->getValueType(0).getVectorElementCount().isScalable() &&
11165 "Scalable flags of index and data do not match");
11167 N->getIndex().getValueType().getVectorElementCount(),
11168 N->getValueType(0).getVectorElementCount()) &&
11169 "Vector width mismatch between index and data");
11170 assert(isa<ConstantSDNode>(N->getScale()) &&
11171 N->getScale()->getAsAPIntVal().isPowerOf2() &&
11172 "Scale should be a constant power of 2");
11173
11174 CSEMap.InsertNode(N, IP);
11175 InsertNode(N);
11176 SDValue V(N, 0);
11177 NewSDValueDbgMsg(V, "Creating new node: ", this);
11178 return V;
11179}
11180
11183 MachineMemOperand *MMO,
11184 ISD::MemIndexType IndexType) {
11185 assert(Ops.size() == 7 && "Incompatible number of operands");
11186
11188 AddNodeIDNode(ID, ISD::VP_SCATTER, VTs, Ops);
11189 ID.AddInteger(VT.getRawBits());
11190 ID.AddInteger(getSyntheticNodeSubclassData<VPScatterSDNode>(
11191 dl.getIROrder(), VTs, VT, MMO, IndexType));
11192 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11193 ID.AddInteger(MMO->getFlags());
11194 void *IP = nullptr;
11195 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
11196 cast<VPScatterSDNode>(E)->refineAlignment(MMO);
11197 return SDValue(E, 0);
11198 }
11199 auto *N = newSDNode<VPScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
11200 VT, MMO, IndexType);
11201 createOperands(N, Ops);
11202
11203 assert(N->getMask().getValueType().getVectorElementCount() ==
11204 N->getValue().getValueType().getVectorElementCount() &&
11205 "Vector width mismatch between mask and data");
11206 assert(
11207 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
11208 N->getValue().getValueType().getVectorElementCount().isScalable() &&
11209 "Scalable flags of index and data do not match");
11211 N->getIndex().getValueType().getVectorElementCount(),
11212 N->getValue().getValueType().getVectorElementCount()) &&
11213 "Vector width mismatch between index and data");
11214 assert(isa<ConstantSDNode>(N->getScale()) &&
11215 N->getScale()->getAsAPIntVal().isPowerOf2() &&
11216 "Scale should be a constant power of 2");
11217
11218 CSEMap.InsertNode(N, IP);
11219 InsertNode(N);
11220 SDValue V(N, 0);
11221 NewSDValueDbgMsg(V, "Creating new node: ", this);
11222 return V;
11223}
11224
11227 SDValue PassThru, EVT MemVT,
11228 MachineMemOperand *MMO,
11230 ISD::LoadExtType ExtTy, bool isExpanding) {
11231 bool Indexed = AM != ISD::UNINDEXED;
11232 assert((Indexed || Offset.isUndef()) &&
11233 "Unindexed masked load with an offset!");
11234 SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other)
11235 : getVTList(VT, MVT::Other);
11236 SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru};
11239 ID.AddInteger(MemVT.getRawBits());
11240 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
11241 dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO));
11242 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11243 ID.AddInteger(MMO->getFlags());
11244 void *IP = nullptr;
11245 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
11246 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
11247 return SDValue(E, 0);
11248 }
11249 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
11250 AM, ExtTy, isExpanding, MemVT, MMO);
11251 createOperands(N, Ops);
11252
11253 CSEMap.InsertNode(N, IP);
11254 InsertNode(N);
11255 SDValue V(N, 0);
11256 NewSDValueDbgMsg(V, "Creating new node: ", this);
11257 return V;
11258}
11259
11264 assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!");
11265 return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base,
11266 Offset, LD->getMask(), LD->getPassThru(),
11267 LD->getMemoryVT(), LD->getMemOperand(), AM,
11268 LD->getExtensionType(), LD->isExpandingLoad());
11269}
11270
11273 SDValue Mask, EVT MemVT,
11274 MachineMemOperand *MMO,
11275 ISD::MemIndexedMode AM, bool IsTruncating,
11276 bool IsCompressing) {
11277 assert(Chain.getValueType() == MVT::Other &&
11278 "Invalid chain type");
11279 bool Indexed = AM != ISD::UNINDEXED;
11280 assert((Indexed || Offset.isUndef()) &&
11281 "Unindexed masked store with an offset!");
11282 SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other)
11283 : getVTList(MVT::Other);
11284 SDValue Ops[] = {Chain, Val, Base, Offset, Mask};
11287 ID.AddInteger(MemVT.getRawBits());
11288 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
11289 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
11290 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11291 ID.AddInteger(MMO->getFlags());
11292 void *IP = nullptr;
11293 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
11294 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
11295 return SDValue(E, 0);
11296 }
11297 auto *N =
11298 newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
11299 IsTruncating, IsCompressing, MemVT, MMO);
11300 createOperands(N, Ops);
11301
11302 CSEMap.InsertNode(N, IP);
11303 InsertNode(N);
11304 SDValue V(N, 0);
11305 NewSDValueDbgMsg(V, "Creating new node: ", this);
11306 return V;
11307}
11308
11313 assert(ST->getOffset().isUndef() &&
11314 "Masked store is already a indexed store!");
11315 return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset,
11316 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
11317 AM, ST->isTruncatingStore(), ST->isCompressingStore());
11318}
11319
11322 MachineMemOperand *MMO,
11323 ISD::MemIndexType IndexType,
11324 ISD::LoadExtType ExtTy) {
11325 assert(Ops.size() == 6 && "Incompatible number of operands");
11326
11329 ID.AddInteger(MemVT.getRawBits());
11330 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
11331 dl.getIROrder(), VTs, MemVT, MMO, IndexType, ExtTy));
11332 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11333 ID.AddInteger(MMO->getFlags());
11334 void *IP = nullptr;
11335 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
11336 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
11337 return SDValue(E, 0);
11338 }
11339
11340 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
11341 VTs, MemVT, MMO, IndexType, ExtTy);
11342 createOperands(N, Ops);
11343
11344 assert(N->getPassThru().getValueType() == N->getValueType(0) &&
11345 "Incompatible type of the PassThru value in MaskedGatherSDNode");
11346 assert(N->getMask().getValueType().getVectorElementCount() ==
11347 N->getValueType(0).getVectorElementCount() &&
11348 "Vector width mismatch between mask and data");
11349 assert(N->getIndex().getValueType().getVectorElementCount().isScalable() ==
11350 N->getValueType(0).getVectorElementCount().isScalable() &&
11351 "Scalable flags of index and data do not match");
11353 N->getIndex().getValueType().getVectorElementCount(),
11354 N->getValueType(0).getVectorElementCount()) &&
11355 "Vector width mismatch between index and data");
11356 assert(isa<ConstantSDNode>(N->getScale()) &&
11357 N->getScale()->getAsAPIntVal().isPowerOf2() &&
11358 "Scale should be a constant power of 2");
11359
11360 CSEMap.InsertNode(N, IP);
11361 InsertNode(N);
11362 SDValue V(N, 0);
11363 NewSDValueDbgMsg(V, "Creating new node: ", this);
11364 return V;
11365}
11366
11369 MachineMemOperand *MMO,
11370 ISD::MemIndexType IndexType,
11371 bool IsTrunc) {
11372 assert(Ops.size() == 6 && "Incompatible number of operands");
11373
11376 ID.AddInteger(MemVT.getRawBits());
11377 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
11378 dl.getIROrder(), VTs, MemVT, MMO, IndexType, IsTrunc));
11379 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11380 ID.AddInteger(MMO->getFlags());
11381 void *IP = nullptr;
11382 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
11383 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
11384 return SDValue(E, 0);
11385 }
11386
11387 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
11388 VTs, MemVT, MMO, IndexType, IsTrunc);
11389 createOperands(N, Ops);
11390
11391 assert(N->getMask().getValueType().getVectorElementCount() ==
11392 N->getValue().getValueType().getVectorElementCount() &&
11393 "Vector width mismatch between mask and data");
11394 assert(
11395 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
11396 N->getValue().getValueType().getVectorElementCount().isScalable() &&
11397 "Scalable flags of index and data do not match");
11399 N->getIndex().getValueType().getVectorElementCount(),
11400 N->getValue().getValueType().getVectorElementCount()) &&
11401 "Vector width mismatch between index and data");
11402 assert(isa<ConstantSDNode>(N->getScale()) &&
11403 N->getScale()->getAsAPIntVal().isPowerOf2() &&
11404 "Scale should be a constant power of 2");
11405
11406 CSEMap.InsertNode(N, IP);
11407 InsertNode(N);
11408 SDValue V(N, 0);
11409 NewSDValueDbgMsg(V, "Creating new node: ", this);
11410 return V;
11411}
11412
11414 const SDLoc &dl, ArrayRef<SDValue> Ops,
11415 MachineMemOperand *MMO,
11416 ISD::MemIndexType IndexType) {
11417 assert(Ops.size() == 7 && "Incompatible number of operands");
11418
11421 ID.AddInteger(MemVT.getRawBits());
11422 ID.AddInteger(getSyntheticNodeSubclassData<MaskedHistogramSDNode>(
11423 dl.getIROrder(), VTs, MemVT, MMO, IndexType));
11424 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11425 ID.AddInteger(MMO->getFlags());
11426 void *IP = nullptr;
11427 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
11428 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
11429 return SDValue(E, 0);
11430 }
11431
11432 auto *N = newSDNode<MaskedHistogramSDNode>(dl.getIROrder(), dl.getDebugLoc(),
11433 VTs, MemVT, MMO, IndexType);
11434 createOperands(N, Ops);
11435
11436 assert(N->getMask().getValueType().getVectorElementCount() ==
11437 N->getIndex().getValueType().getVectorElementCount() &&
11438 "Vector width mismatch between mask and data");
11439 assert(isa<ConstantSDNode>(N->getScale()) &&
11440 N->getScale()->getAsAPIntVal().isPowerOf2() &&
11441 "Scale should be a constant power of 2");
11442 assert(N->getInc().getValueType().isInteger() && "Non integer update value");
11443
11444 CSEMap.InsertNode(N, IP);
11445 InsertNode(N);
11446 SDValue V(N, 0);
11447 NewSDValueDbgMsg(V, "Creating new node: ", this);
11448 return V;
11449}
11450
11452 SDValue Ptr, SDValue Mask, SDValue EVL,
11453 MachineMemOperand *MMO) {
11454 SDVTList VTs = getVTList(VT, EVL.getValueType(), MVT::Other);
11455 SDValue Ops[] = {Chain, Ptr, Mask, EVL};
11457 AddNodeIDNode(ID, ISD::VP_LOAD_FF, VTs, Ops);
11458 ID.AddInteger(VT.getRawBits());
11459 ID.AddInteger(getSyntheticNodeSubclassData<VPLoadFFSDNode>(DL.getIROrder(),
11460 VTs, VT, MMO));
11461 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11462 ID.AddInteger(MMO->getFlags());
11463 void *IP = nullptr;
11464 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
11465 cast<VPLoadFFSDNode>(E)->refineAlignment(MMO);
11466 return SDValue(E, 0);
11467 }
11468 auto *N = newSDNode<VPLoadFFSDNode>(DL.getIROrder(), DL.getDebugLoc(), VTs,
11469 VT, MMO);
11470 createOperands(N, Ops);
11471
11472 CSEMap.InsertNode(N, IP);
11473 InsertNode(N);
11474 SDValue V(N, 0);
11475 NewSDValueDbgMsg(V, "Creating new node: ", this);
11476 return V;
11477}
11478
11480 EVT MemVT, MachineMemOperand *MMO) {
11481 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
11482 SDVTList VTs = getVTList(MVT::Other);
11483 SDValue Ops[] = {Chain, Ptr};
11486 ID.AddInteger(MemVT.getRawBits());
11487 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
11488 ISD::GET_FPENV_MEM, dl.getIROrder(), VTs, MemVT, MMO));
11489 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11490 ID.AddInteger(MMO->getFlags());
11491 void *IP = nullptr;
11492 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
11493 return SDValue(E, 0);
11494
11495 auto *N = newSDNode<FPStateAccessSDNode>(ISD::GET_FPENV_MEM, dl.getIROrder(),
11496 dl.getDebugLoc(), VTs, MemVT, MMO);
11497 createOperands(N, Ops);
11498
11499 CSEMap.InsertNode(N, IP);
11500 InsertNode(N);
11501 SDValue V(N, 0);
11502 NewSDValueDbgMsg(V, "Creating new node: ", this);
11503 return V;
11504}
11505
11507 EVT MemVT, MachineMemOperand *MMO) {
11508 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
11509 SDVTList VTs = getVTList(MVT::Other);
11510 SDValue Ops[] = {Chain, Ptr};
11513 ID.AddInteger(MemVT.getRawBits());
11514 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
11515 ISD::SET_FPENV_MEM, dl.getIROrder(), VTs, MemVT, MMO));
11516 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11517 ID.AddInteger(MMO->getFlags());
11518 void *IP = nullptr;
11519 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
11520 return SDValue(E, 0);
11521
11522 auto *N = newSDNode<FPStateAccessSDNode>(ISD::SET_FPENV_MEM, dl.getIROrder(),
11523 dl.getDebugLoc(), VTs, MemVT, MMO);
11524 createOperands(N, Ops);
11525
11526 CSEMap.InsertNode(N, IP);
11527 InsertNode(N);
11528 SDValue V(N, 0);
11529 NewSDValueDbgMsg(V, "Creating new node: ", this);
11530 return V;
11531}
11532
11534 // select undef, T, F --> T (if T is a constant), otherwise F
11535 // select, ?, undef, F --> F
11536 // select, ?, T, undef --> T
11537 if (Cond.isUndef())
11538 return isConstantValueOfAnyType(T) ? T : F;
11539 if (T.isUndef())
11541 if (F.isUndef())
11543
11544 // select true, T, F --> T
11545 // select false, T, F --> F
11546 if (auto C = isBoolConstant(Cond))
11547 return *C ? T : F;
11548
11549 // select ?, T, T --> T
11550 if (T == F)
11551 return T;
11552
11553 return SDValue();
11554}
11555
11557 // shift undef, Y --> 0 (can always assume that the undef value is 0)
11558 if (X.isUndef())
11559 return getConstant(0, SDLoc(X.getNode()), X.getValueType());
11560 // shift X, undef --> undef (because it may shift by the bitwidth)
11561 if (Y.isUndef())
11562 return getUNDEF(X.getValueType());
11563
11564 // shift 0, Y --> 0
11565 // shift X, 0 --> X
11567 return X;
11568
11569 // shift X, C >= bitwidth(X) --> undef
11570 // All vector elements must be too big (or undef) to avoid partial undefs.
11571 auto isShiftTooBig = [X](ConstantSDNode *Val) {
11572 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits());
11573 };
11574 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true))
11575 return getUNDEF(X.getValueType());
11576
11577 // shift i1/vXi1 X, Y --> X (any non-zero shift amount is undefined).
11578 if (X.getValueType().getScalarType() == MVT::i1)
11579 return X;
11580
11581 return SDValue();
11582}
11583
11585 SDNodeFlags Flags) {
11586 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
11587 // (an undef operand can be chosen to be Nan/Inf), then the result of this
11588 // operation is poison. That result can be relaxed to undef.
11589 ConstantFPSDNode *XC = isConstOrConstSplatFP(X, /* AllowUndefs */ true);
11590 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true);
11591 bool HasNan = (XC && XC->getValueAPF().isNaN()) ||
11592 (YC && YC->getValueAPF().isNaN());
11593 bool HasInf = (XC && XC->getValueAPF().isInfinity()) ||
11594 (YC && YC->getValueAPF().isInfinity());
11595
11596 if (Flags.hasNoNaNs() && (HasNan || X.isUndef() || Y.isUndef()))
11597 return getUNDEF(X.getValueType());
11598
11599 if (Flags.hasNoInfs() && (HasInf || X.isUndef() || Y.isUndef()))
11600 return getUNDEF(X.getValueType());
11601
11602 if (!YC)
11603 return SDValue();
11604
11605 // X + -0.0 --> X
11606 if (Opcode == ISD::FADD)
11607 if (YC->getValueAPF().isNegZero())
11608 return X;
11609
11610 // X - +0.0 --> X
11611 if (Opcode == ISD::FSUB)
11612 if (YC->getValueAPF().isPosZero())
11613 return X;
11614
11615 // X * 1.0 --> X
11616 // X / 1.0 --> X
11617 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV)
11618 if (YC->getValueAPF().isExactlyValue(1.0))
11619 return X;
11620
11621 // X * 0.0 --> 0.0
11622 if (Opcode == ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros())
11623 if (YC->getValueAPF().isZero())
11624 return getConstantFP(0.0, SDLoc(Y), Y.getValueType());
11625
11626 return SDValue();
11627}
11628
11630 SDValue Ptr, SDValue SV, unsigned Align) {
11631 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
11632 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
11633}
11634
11635SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
11637 switch (Ops.size()) {
11638 case 0: return getNode(Opcode, DL, VT);
11639 case 1: return getNode(Opcode, DL, VT, Ops[0].get());
11640 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
11641 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
11642 default: break;
11643 }
11644
11645 // Copy from an SDUse array into an SDValue array for use with
11646 // the regular getNode logic.
11648 return getNode(Opcode, DL, VT, NewOps);
11649}
11650
11651SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
11653 SDNodeFlags Flags;
11654 if (Inserter)
11655 Flags = Inserter->getFlags();
11656 return getNode(Opcode, DL, VT, Ops, Flags);
11657}
11658
11659SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
11660 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
11661 unsigned NumOps = Ops.size();
11662 switch (NumOps) {
11663 case 0: return getNode(Opcode, DL, VT);
11664 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags);
11665 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags);
11666 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags);
11667 default: break;
11668 }
11669
11670#ifndef NDEBUG
11671 for (const auto &Op : Ops)
11672 assert(Op.getOpcode() != ISD::DELETED_NODE &&
11673 "Operand is DELETED_NODE!");
11674#endif
11675
11676 switch (Opcode) {
11677 default: break;
11678 case ISD::BUILD_VECTOR:
11679 // Attempt to simplify BUILD_VECTOR.
11680 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
11681 return V;
11682 break;
11684 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
11685 return V;
11686 break;
11687 case ISD::SELECT_CC:
11688 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
11689 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
11690 "LHS and RHS of condition must have same type!");
11691 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
11692 "True and False arms of SelectCC must have same type!");
11693 assert(Ops[2].getValueType() == VT &&
11694 "select_cc node must be of same type as true and false value!");
11695 assert((!Ops[0].getValueType().isVector() ||
11696 Ops[0].getValueType().getVectorElementCount() ==
11697 VT.getVectorElementCount()) &&
11698 "Expected select_cc with vector result to have the same sized "
11699 "comparison type!");
11700 break;
11701 case ISD::BR_CC:
11702 assert(NumOps == 5 && "BR_CC takes 5 operands!");
11703 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
11704 "LHS/RHS of comparison should match types!");
11705 break;
11706 case ISD::VP_ADD:
11707 case ISD::VP_SUB:
11708 // If it is VP_ADD/VP_SUB mask operation then turn it to VP_XOR
11709 if (VT.getScalarType() == MVT::i1)
11710 Opcode = ISD::VP_XOR;
11711 break;
11712 case ISD::VP_MUL:
11713 // If it is VP_MUL mask operation then turn it to VP_AND
11714 if (VT.getScalarType() == MVT::i1)
11715 Opcode = ISD::VP_AND;
11716 break;
11717 case ISD::VP_REDUCE_MUL:
11718 // If it is VP_REDUCE_MUL mask operation then turn it to VP_REDUCE_AND
11719 if (VT == MVT::i1)
11720 Opcode = ISD::VP_REDUCE_AND;
11721 break;
11722 case ISD::VP_REDUCE_ADD:
11723 // If it is VP_REDUCE_ADD mask operation then turn it to VP_REDUCE_XOR
11724 if (VT == MVT::i1)
11725 Opcode = ISD::VP_REDUCE_XOR;
11726 break;
11727 case ISD::VP_REDUCE_SMAX:
11728 case ISD::VP_REDUCE_UMIN:
11729 // If it is VP_REDUCE_SMAX/VP_REDUCE_UMIN mask operation then turn it to
11730 // VP_REDUCE_AND.
11731 if (VT == MVT::i1)
11732 Opcode = ISD::VP_REDUCE_AND;
11733 break;
11734 case ISD::VP_REDUCE_SMIN:
11735 case ISD::VP_REDUCE_UMAX:
11736 // If it is VP_REDUCE_SMIN/VP_REDUCE_UMAX mask operation then turn it to
11737 // VP_REDUCE_OR.
11738 if (VT == MVT::i1)
11739 Opcode = ISD::VP_REDUCE_OR;
11740 break;
11741 }
11742
11743 // Memoize nodes.
11744 SDNode *N;
11745 SDVTList VTs = getVTList(VT);
11746
11747 if (VT != MVT::Glue) {
11749 AddNodeIDNode(ID, Opcode, VTs, Ops);
11750 void *IP = nullptr;
11751
11752 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
11753 E->intersectFlagsWith(Flags);
11754 return SDValue(E, 0);
11755 }
11756
11757 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
11758 createOperands(N, Ops);
11759
11760 CSEMap.InsertNode(N, IP);
11761 } else {
11762 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
11763 createOperands(N, Ops);
11764 }
11765
11766 N->setFlags(Flags);
11767 InsertNode(N);
11768 SDValue V(N, 0);
11769 NewSDValueDbgMsg(V, "Creating new node: ", this);
11770 return V;
11771}
11772
11773SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
11774 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
11775 SDNodeFlags Flags;
11776 if (Inserter)
11777 Flags = Inserter->getFlags();
11778 return getNode(Opcode, DL, getVTList(ResultTys), Ops, Flags);
11779}
11780
11781SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
11783 const SDNodeFlags Flags) {
11784 return getNode(Opcode, DL, getVTList(ResultTys), Ops, Flags);
11785}
11786
11787SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
11789 SDNodeFlags Flags;
11790 if (Inserter)
11791 Flags = Inserter->getFlags();
11792 return getNode(Opcode, DL, VTList, Ops, Flags);
11793}
11794
11795SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
11796 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
11797 if (VTList.NumVTs == 1)
11798 return getNode(Opcode, DL, VTList.VTs[0], Ops, Flags);
11799
11800#ifndef NDEBUG
11801 for (const auto &Op : Ops)
11802 assert(Op.getOpcode() != ISD::DELETED_NODE &&
11803 "Operand is DELETED_NODE!");
11804#endif
11805
11806 switch (Opcode) {
11807 case ISD::SADDO:
11808 case ISD::UADDO:
11809 case ISD::SSUBO:
11810 case ISD::USUBO: {
11811 assert(VTList.NumVTs == 2 && Ops.size() == 2 &&
11812 "Invalid add/sub overflow op!");
11813 assert(VTList.VTs[0].isInteger() && VTList.VTs[1].isInteger() &&
11814 Ops[0].getValueType() == Ops[1].getValueType() &&
11815 Ops[0].getValueType() == VTList.VTs[0] &&
11816 "Binary operator types must match!");
11817 SDValue N1 = Ops[0], N2 = Ops[1];
11818 canonicalizeCommutativeBinop(Opcode, N1, N2);
11819
11820 // (X +- 0) -> X with zero-overflow.
11821 ConstantSDNode *N2CV = isConstOrConstSplat(N2, /*AllowUndefs*/ false,
11822 /*AllowTruncation*/ true);
11823 if (N2CV && N2CV->isZero()) {
11824 SDValue ZeroOverFlow = getConstant(0, DL, VTList.VTs[1]);
11825 return getNode(ISD::MERGE_VALUES, DL, VTList, {N1, ZeroOverFlow}, Flags);
11826 }
11827
11828 if (VTList.VTs[0].getScalarType() == MVT::i1 &&
11829 VTList.VTs[1].getScalarType() == MVT::i1) {
11830 SDValue F1 = getFreeze(N1);
11831 SDValue F2 = getFreeze(N2);
11832 // {vXi1,vXi1} (u/s)addo(vXi1 x, vXi1y) -> {xor(x,y),and(x,y)}
11833 if (Opcode == ISD::UADDO || Opcode == ISD::SADDO)
11834 return getNode(ISD::MERGE_VALUES, DL, VTList,
11835 {getNode(ISD::XOR, DL, VTList.VTs[0], F1, F2),
11836 getNode(ISD::AND, DL, VTList.VTs[1], F1, F2)},
11837 Flags);
11838 // {vXi1,vXi1} (u/s)subo(vXi1 x, vXi1y) -> {xor(x,y),and(~x,y)}
11839 if (Opcode == ISD::USUBO || Opcode == ISD::SSUBO) {
11840 SDValue NotF1 = getNOT(DL, F1, VTList.VTs[0]);
11841 return getNode(ISD::MERGE_VALUES, DL, VTList,
11842 {getNode(ISD::XOR, DL, VTList.VTs[0], F1, F2),
11843 getNode(ISD::AND, DL, VTList.VTs[1], NotF1, F2)},
11844 Flags);
11845 }
11846 }
11847 break;
11848 }
11849 case ISD::SADDO_CARRY:
11850 case ISD::UADDO_CARRY:
11851 case ISD::SSUBO_CARRY:
11852 case ISD::USUBO_CARRY:
11853 assert(VTList.NumVTs == 2 && Ops.size() == 3 &&
11854 "Invalid add/sub overflow op!");
11855 assert(VTList.VTs[0].isInteger() && VTList.VTs[1].isInteger() &&
11856 Ops[0].getValueType() == Ops[1].getValueType() &&
11857 Ops[0].getValueType() == VTList.VTs[0] &&
11858 Ops[2].getValueType() == VTList.VTs[1] &&
11859 "Binary operator types must match!");
11860 break;
11861 case ISD::SMUL_LOHI:
11862 case ISD::UMUL_LOHI: {
11863 assert(VTList.NumVTs == 2 && Ops.size() == 2 && "Invalid mul lo/hi op!");
11864 assert(VTList.VTs[0].isInteger() && VTList.VTs[0] == VTList.VTs[1] &&
11865 VTList.VTs[0] == Ops[0].getValueType() &&
11866 VTList.VTs[0] == Ops[1].getValueType() &&
11867 "Binary operator types must match!");
11868 // Constant fold.
11871 if (LHS && RHS) {
11872 unsigned Width = VTList.VTs[0].getScalarSizeInBits();
11873 unsigned OutWidth = Width * 2;
11874 APInt Val = LHS->getAPIntValue();
11875 APInt Mul = RHS->getAPIntValue();
11876 if (Opcode == ISD::SMUL_LOHI) {
11877 Val = Val.sext(OutWidth);
11878 Mul = Mul.sext(OutWidth);
11879 } else {
11880 Val = Val.zext(OutWidth);
11881 Mul = Mul.zext(OutWidth);
11882 }
11883 Val *= Mul;
11884
11885 SDValue Hi =
11886 getConstant(Val.extractBits(Width, Width), DL, VTList.VTs[0]);
11887 SDValue Lo = getConstant(Val.trunc(Width), DL, VTList.VTs[0]);
11888 return getNode(ISD::MERGE_VALUES, DL, VTList, {Lo, Hi}, Flags);
11889 }
11890 break;
11891 }
11892 case ISD::FFREXP: {
11893 assert(VTList.NumVTs == 2 && Ops.size() == 1 && "Invalid ffrexp op!");
11894 assert(VTList.VTs[0].isFloatingPoint() && VTList.VTs[1].isInteger() &&
11895 VTList.VTs[0] == Ops[0].getValueType() && "frexp type mismatch");
11896
11898 int FrexpExp;
11899 APFloat FrexpMant =
11900 frexp(C->getValueAPF(), FrexpExp, APFloat::rmNearestTiesToEven);
11901 SDValue Result0 = getConstantFP(FrexpMant, DL, VTList.VTs[0]);
11902 SDValue Result1 = getSignedConstant(FrexpMant.isFinite() ? FrexpExp : 0,
11903 DL, VTList.VTs[1]);
11904 return getNode(ISD::MERGE_VALUES, DL, VTList, {Result0, Result1}, Flags);
11905 }
11906
11907 break;
11908 }
11910 assert(VTList.NumVTs == 2 && Ops.size() == 2 &&
11911 "Invalid STRICT_FP_EXTEND!");
11912 assert(VTList.VTs[0].isFloatingPoint() &&
11913 Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!");
11914 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
11915 "STRICT_FP_EXTEND result type should be vector iff the operand "
11916 "type is vector!");
11917 assert((!VTList.VTs[0].isVector() ||
11918 VTList.VTs[0].getVectorElementCount() ==
11919 Ops[1].getValueType().getVectorElementCount()) &&
11920 "Vector element count mismatch!");
11921 assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) &&
11922 "Invalid fpext node, dst <= src!");
11923 break;
11925 assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!");
11926 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
11927 "STRICT_FP_ROUND result type should be vector iff the operand "
11928 "type is vector!");
11929 assert((!VTList.VTs[0].isVector() ||
11930 VTList.VTs[0].getVectorElementCount() ==
11931 Ops[1].getValueType().getVectorElementCount()) &&
11932 "Vector element count mismatch!");
11933 assert(VTList.VTs[0].isFloatingPoint() &&
11934 Ops[1].getValueType().isFloatingPoint() &&
11935 VTList.VTs[0].bitsLT(Ops[1].getValueType()) &&
11936 Ops[2].getOpcode() == ISD::TargetConstant &&
11937 (Ops[2]->getAsZExtVal() == 0 || Ops[2]->getAsZExtVal() == 1) &&
11938 "Invalid STRICT_FP_ROUND!");
11939 break;
11940 }
11941
11942 // Memoize the node unless it returns a glue result.
11943 SDNode *N;
11944 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
11946 AddNodeIDNode(ID, Opcode, VTList, Ops);
11947 void *IP = nullptr;
11948 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
11949 E->intersectFlagsWith(Flags);
11950 return SDValue(E, 0);
11951 }
11952
11953 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
11954 createOperands(N, Ops);
11955 CSEMap.InsertNode(N, IP);
11956 } else {
11957 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
11958 createOperands(N, Ops);
11959 }
11960
11961 N->setFlags(Flags);
11962 InsertNode(N);
11963 SDValue V(N, 0);
11964 NewSDValueDbgMsg(V, "Creating new node: ", this);
11965 return V;
11966}
11967
11968SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
11969 SDVTList VTList) {
11970 return getNode(Opcode, DL, VTList, ArrayRef<SDValue>());
11971}
11972
11973SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
11974 SDValue N1) {
11975 SDValue Ops[] = { N1 };
11976 return getNode(Opcode, DL, VTList, Ops);
11977}
11978
11979SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
11980 SDValue N1, SDValue N2) {
11981 SDValue Ops[] = { N1, N2 };
11982 return getNode(Opcode, DL, VTList, Ops);
11983}
11984
11985SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
11986 SDValue N1, SDValue N2, SDValue N3) {
11987 SDValue Ops[] = { N1, N2, N3 };
11988 return getNode(Opcode, DL, VTList, Ops);
11989}
11990
11991SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
11992 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
11993 SDValue Ops[] = { N1, N2, N3, N4 };
11994 return getNode(Opcode, DL, VTList, Ops);
11995}
11996
11997SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
11998 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
11999 SDValue N5) {
12000 SDValue Ops[] = { N1, N2, N3, N4, N5 };
12001 return getNode(Opcode, DL, VTList, Ops);
12002}
12003
12005 if (!VT.isExtended())
12006 return makeVTList(SDNode::getValueTypeList(VT.getSimpleVT()), 1);
12007
12008 return makeVTList(&(*EVTs.insert(VT).first), 1);
12009}
12010
12013 ID.AddInteger(2U);
12014 ID.AddInteger(VT1.getRawBits());
12015 ID.AddInteger(VT2.getRawBits());
12016
12017 void *IP = nullptr;
12018 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
12019 if (!Result) {
12020 EVT *Array = Allocator.Allocate<EVT>(2);
12021 Array[0] = VT1;
12022 Array[1] = VT2;
12023 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
12024 VTListMap.InsertNode(Result, IP);
12025 }
12026 return Result->getSDVTList();
12027}
12028
12031 ID.AddInteger(3U);
12032 ID.AddInteger(VT1.getRawBits());
12033 ID.AddInteger(VT2.getRawBits());
12034 ID.AddInteger(VT3.getRawBits());
12035
12036 void *IP = nullptr;
12037 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
12038 if (!Result) {
12039 EVT *Array = Allocator.Allocate<EVT>(3);
12040 Array[0] = VT1;
12041 Array[1] = VT2;
12042 Array[2] = VT3;
12043 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
12044 VTListMap.InsertNode(Result, IP);
12045 }
12046 return Result->getSDVTList();
12047}
12048
12051 ID.AddInteger(4U);
12052 ID.AddInteger(VT1.getRawBits());
12053 ID.AddInteger(VT2.getRawBits());
12054 ID.AddInteger(VT3.getRawBits());
12055 ID.AddInteger(VT4.getRawBits());
12056
12057 void *IP = nullptr;
12058 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
12059 if (!Result) {
12060 EVT *Array = Allocator.Allocate<EVT>(4);
12061 Array[0] = VT1;
12062 Array[1] = VT2;
12063 Array[2] = VT3;
12064 Array[3] = VT4;
12065 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
12066 VTListMap.InsertNode(Result, IP);
12067 }
12068 return Result->getSDVTList();
12069}
12070
12072 unsigned NumVTs = VTs.size();
12074 ID.AddInteger(NumVTs);
12075 for (unsigned index = 0; index < NumVTs; index++) {
12076 ID.AddInteger(VTs[index].getRawBits());
12077 }
12078
12079 void *IP = nullptr;
12080 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
12081 if (!Result) {
12082 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
12083 llvm::copy(VTs, Array);
12084 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
12085 VTListMap.InsertNode(Result, IP);
12086 }
12087 return Result->getSDVTList();
12088}
12089
12090
12091/// UpdateNodeOperands - *Mutate* the specified node in-place to have the
12092/// specified operands. If the resultant node already exists in the DAG,
12093/// this does not modify the specified node, instead it returns the node that
12094/// already exists. If the resultant node does not exist in the DAG, the
12095/// input node is returned. As a degenerate case, if you specify the same
12096/// input operands as the node already has, the input node is returned.
12098 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
12099
12100 // Check to see if there is no change.
12101 if (Op == N->getOperand(0)) return N;
12102
12103 // See if the modified node already exists.
12104 void *InsertPos = nullptr;
12105 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
12106 return Existing;
12107
12108 // Nope it doesn't. Remove the node from its current place in the maps.
12109 if (InsertPos)
12110 if (!RemoveNodeFromCSEMaps(N))
12111 InsertPos = nullptr;
12112
12113 // Now we update the operands.
12114 N->OperandList[0].set(Op);
12115
12117 // If this gets put into a CSE map, add it.
12118 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
12119 return N;
12120}
12121
12123 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
12124
12125 // Check to see if there is no change.
12126 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
12127 return N; // No operands changed, just return the input node.
12128
12129 // See if the modified node already exists.
12130 void *InsertPos = nullptr;
12131 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
12132 return Existing;
12133
12134 // Nope it doesn't. Remove the node from its current place in the maps.
12135 if (InsertPos)
12136 if (!RemoveNodeFromCSEMaps(N))
12137 InsertPos = nullptr;
12138
12139 // Now we update the operands.
12140 if (N->OperandList[0] != Op1)
12141 N->OperandList[0].set(Op1);
12142 if (N->OperandList[1] != Op2)
12143 N->OperandList[1].set(Op2);
12144
12146 // If this gets put into a CSE map, add it.
12147 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
12148 return N;
12149}
12150
12153 SDValue Ops[] = { Op1, Op2, Op3 };
12154 return UpdateNodeOperands(N, Ops);
12155}
12156
12159 SDValue Op3, SDValue Op4) {
12160 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
12161 return UpdateNodeOperands(N, Ops);
12162}
12163
12166 SDValue Op3, SDValue Op4, SDValue Op5) {
12167 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
12168 return UpdateNodeOperands(N, Ops);
12169}
12170
12173 unsigned NumOps = Ops.size();
12174 assert(N->getNumOperands() == NumOps &&
12175 "Update with wrong number of operands");
12176
12177 // If no operands changed just return the input node.
12178 if (std::equal(Ops.begin(), Ops.end(), N->op_begin()))
12179 return N;
12180
12181 // See if the modified node already exists.
12182 void *InsertPos = nullptr;
12183 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
12184 return Existing;
12185
12186 // Nope it doesn't. Remove the node from its current place in the maps.
12187 if (InsertPos)
12188 if (!RemoveNodeFromCSEMaps(N))
12189 InsertPos = nullptr;
12190
12191 // Now we update the operands.
12192 for (unsigned i = 0; i != NumOps; ++i)
12193 if (N->OperandList[i] != Ops[i])
12194 N->OperandList[i].set(Ops[i]);
12195
12197 // If this gets put into a CSE map, add it.
12198 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
12199 return N;
12200}
12201
12202/// DropOperands - Release the operands and set this node to have
12203/// zero operands.
12205 // Unlike the code in MorphNodeTo that does this, we don't need to
12206 // watch for dead nodes here.
12207 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
12208 SDUse &Use = *I++;
12209 Use.set(SDValue());
12210 }
12211}
12212
12214 ArrayRef<MachineMemOperand *> NewMemRefs) {
12215 if (NewMemRefs.empty()) {
12216 N->clearMemRefs();
12217 return;
12218 }
12219
12220 // Check if we can avoid allocating by storing a single reference directly.
12221 if (NewMemRefs.size() == 1) {
12222 N->MemRefs = NewMemRefs[0];
12223 N->NumMemRefs = 1;
12224 return;
12225 }
12226
12227 MachineMemOperand **MemRefsBuffer =
12228 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size());
12229 llvm::copy(NewMemRefs, MemRefsBuffer);
12230 N->MemRefs = MemRefsBuffer;
12231 N->NumMemRefs = static_cast<int>(NewMemRefs.size());
12232}
12233
12234/// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
12235/// machine opcode.
12236///
12238 EVT VT) {
12239 SDVTList VTs = getVTList(VT);
12240 return SelectNodeTo(N, MachineOpc, VTs, {});
12241}
12242
12244 EVT VT, SDValue Op1) {
12245 SDVTList VTs = getVTList(VT);
12246 SDValue Ops[] = { Op1 };
12247 return SelectNodeTo(N, MachineOpc, VTs, Ops);
12248}
12249
12251 EVT VT, SDValue Op1,
12252 SDValue Op2) {
12253 SDVTList VTs = getVTList(VT);
12254 SDValue Ops[] = { Op1, Op2 };
12255 return SelectNodeTo(N, MachineOpc, VTs, Ops);
12256}
12257
12259 EVT VT, SDValue Op1,
12260 SDValue Op2, SDValue Op3) {
12261 SDVTList VTs = getVTList(VT);
12262 SDValue Ops[] = { Op1, Op2, Op3 };
12263 return SelectNodeTo(N, MachineOpc, VTs, Ops);
12264}
12265
12268 SDVTList VTs = getVTList(VT);
12269 return SelectNodeTo(N, MachineOpc, VTs, Ops);
12270}
12271
12273 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
12274 SDVTList VTs = getVTList(VT1, VT2);
12275 return SelectNodeTo(N, MachineOpc, VTs, Ops);
12276}
12277
12279 EVT VT1, EVT VT2) {
12280 SDVTList VTs = getVTList(VT1, VT2);
12281 return SelectNodeTo(N, MachineOpc, VTs, {});
12282}
12283
12285 EVT VT1, EVT VT2, EVT VT3,
12287 SDVTList VTs = getVTList(VT1, VT2, VT3);
12288 return SelectNodeTo(N, MachineOpc, VTs, Ops);
12289}
12290
12292 EVT VT1, EVT VT2,
12293 SDValue Op1, SDValue Op2) {
12294 SDVTList VTs = getVTList(VT1, VT2);
12295 SDValue Ops[] = { Op1, Op2 };
12296 return SelectNodeTo(N, MachineOpc, VTs, Ops);
12297}
12298
12301 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
12302 // Reset the NodeID to -1.
12303 New->setNodeId(-1);
12304 if (New != N) {
12305 ReplaceAllUsesWith(N, New);
12307 }
12308 return New;
12309}
12310
12311/// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
12312/// the line number information on the merged node since it is not possible to
12313/// preserve the information that operation is associated with multiple lines.
12314/// This will make the debugger working better at -O0, were there is a higher
12315/// probability having other instructions associated with that line.
12316///
12317/// For IROrder, we keep the smaller of the two
12318SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) {
12319 DebugLoc NLoc = N->getDebugLoc();
12320 if (NLoc && OptLevel == CodeGenOptLevel::None && OLoc.getDebugLoc() != NLoc) {
12321 N->setDebugLoc(DebugLoc());
12322 }
12323 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
12324 N->setIROrder(Order);
12325 return N;
12326}
12327
12328/// MorphNodeTo - This *mutates* the specified node to have the specified
12329/// return type, opcode, and operands.
12330///
12331/// Note that MorphNodeTo returns the resultant node. If there is already a
12332/// node of the specified opcode and operands, it returns that node instead of
12333/// the current one. Note that the SDLoc need not be the same.
12334///
12335/// Using MorphNodeTo is faster than creating a new node and swapping it in
12336/// with ReplaceAllUsesWith both because it often avoids allocating a new
12337/// node, and because it doesn't require CSE recalculation for any of
12338/// the node's users.
12339///
12340/// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
12341/// As a consequence it isn't appropriate to use from within the DAG combiner or
12342/// the legalizer which maintain worklists that would need to be updated when
12343/// deleting things.
12346 // If an identical node already exists, use it.
12347 void *IP = nullptr;
12348 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
12350 AddNodeIDNode(ID, Opc, VTs, Ops);
12351 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP))
12352 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N));
12353 }
12354
12355 if (!RemoveNodeFromCSEMaps(N))
12356 IP = nullptr;
12357
12358 // Start the morphing.
12359 N->NodeType = Opc;
12360 N->ValueList = VTs.VTs;
12361 N->NumValues = VTs.NumVTs;
12362
12363 // Clear the operands list, updating used nodes to remove this from their
12364 // use list. Keep track of any operands that become dead as a result.
12365 SmallPtrSet<SDNode*, 16> DeadNodeSet;
12366 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
12367 SDUse &Use = *I++;
12368 SDNode *Used = Use.getNode();
12369 Use.set(SDValue());
12370 if (Used->use_empty())
12371 DeadNodeSet.insert(Used);
12372 }
12373
12374 // For MachineNode, initialize the memory references information.
12376 MN->clearMemRefs();
12377
12378 // Swap for an appropriately sized array from the recycler.
12379 removeOperands(N);
12380 createOperands(N, Ops);
12381
12382 // Delete any nodes that are still dead after adding the uses for the
12383 // new operands.
12384 if (!DeadNodeSet.empty()) {
12385 SmallVector<SDNode *, 16> DeadNodes;
12386 for (SDNode *N : DeadNodeSet)
12387 if (N->use_empty())
12388 DeadNodes.push_back(N);
12389 RemoveDeadNodes(DeadNodes);
12390 }
12391
12392 if (IP)
12393 CSEMap.InsertNode(N, IP); // Memoize the new node.
12394 return N;
12395}
12396
12398 unsigned OrigOpc = Node->getOpcode();
12399 unsigned NewOpc;
12400 switch (OrigOpc) {
12401 default:
12402 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!");
12403#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
12404 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
12405#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
12406 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
12407#include "llvm/IR/ConstrainedOps.def"
12408 }
12409
12410 assert(Node->getNumValues() == 2 && "Unexpected number of results!");
12411
12412 // We're taking this node out of the chain, so we need to re-link things.
12413 SDValue InputChain = Node->getOperand(0);
12414 SDValue OutputChain = SDValue(Node, 1);
12415 ReplaceAllUsesOfValueWith(OutputChain, InputChain);
12416
12418 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
12419 Ops.push_back(Node->getOperand(i));
12420
12421 SDVTList VTs = getVTList(Node->getValueType(0));
12422 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops);
12423
12424 // MorphNodeTo can operate in two ways: if an existing node with the
12425 // specified operands exists, it can just return it. Otherwise, it
12426 // updates the node in place to have the requested operands.
12427 if (Res == Node) {
12428 // If we updated the node in place, reset the node ID. To the isel,
12429 // this should be just like a newly allocated machine node.
12430 Res->setNodeId(-1);
12431 } else {
12434 }
12435
12436 return Res;
12437}
12438
12439/// getMachineNode - These are used for target selectors to create a new node
12440/// with specified return type(s), MachineInstr opcode, and operands.
12441///
12442/// Note that getMachineNode returns the resultant node. If there is already a
12443/// node of the specified opcode and operands, it returns that node instead of
12444/// the current one.
12446 EVT VT) {
12447 SDVTList VTs = getVTList(VT);
12448 return getMachineNode(Opcode, dl, VTs, {});
12449}
12450
12452 EVT VT, SDValue Op1) {
12453 SDVTList VTs = getVTList(VT);
12454 SDValue Ops[] = { Op1 };
12455 return getMachineNode(Opcode, dl, VTs, Ops);
12456}
12457
12459 EVT VT, SDValue Op1, SDValue Op2) {
12460 SDVTList VTs = getVTList(VT);
12461 SDValue Ops[] = { Op1, Op2 };
12462 return getMachineNode(Opcode, dl, VTs, Ops);
12463}
12464
12466 EVT VT, SDValue Op1, SDValue Op2,
12467 SDValue Op3) {
12468 SDVTList VTs = getVTList(VT);
12469 SDValue Ops[] = { Op1, Op2, Op3 };
12470 return getMachineNode(Opcode, dl, VTs, Ops);
12471}
12472
12475 SDVTList VTs = getVTList(VT);
12476 return getMachineNode(Opcode, dl, VTs, Ops);
12477}
12478
12480 EVT VT1, EVT VT2, SDValue Op1,
12481 SDValue Op2) {
12482 SDVTList VTs = getVTList(VT1, VT2);
12483 SDValue Ops[] = { Op1, Op2 };
12484 return getMachineNode(Opcode, dl, VTs, Ops);
12485}
12486
12488 EVT VT1, EVT VT2, SDValue Op1,
12489 SDValue Op2, SDValue Op3) {
12490 SDVTList VTs = getVTList(VT1, VT2);
12491 SDValue Ops[] = { Op1, Op2, Op3 };
12492 return getMachineNode(Opcode, dl, VTs, Ops);
12493}
12494
12496 EVT VT1, EVT VT2,
12498 SDVTList VTs = getVTList(VT1, VT2);
12499 return getMachineNode(Opcode, dl, VTs, Ops);
12500}
12501
12503 EVT VT1, EVT VT2, EVT VT3,
12504 SDValue Op1, SDValue Op2) {
12505 SDVTList VTs = getVTList(VT1, VT2, VT3);
12506 SDValue Ops[] = { Op1, Op2 };
12507 return getMachineNode(Opcode, dl, VTs, Ops);
12508}
12509
12511 EVT VT1, EVT VT2, EVT VT3,
12512 SDValue Op1, SDValue Op2,
12513 SDValue Op3) {
12514 SDVTList VTs = getVTList(VT1, VT2, VT3);
12515 SDValue Ops[] = { Op1, Op2, Op3 };
12516 return getMachineNode(Opcode, dl, VTs, Ops);
12517}
12518
12520 EVT VT1, EVT VT2, EVT VT3,
12522 SDVTList VTs = getVTList(VT1, VT2, VT3);
12523 return getMachineNode(Opcode, dl, VTs, Ops);
12524}
12525
12527 ArrayRef<EVT> ResultTys,
12529 SDVTList VTs = getVTList(ResultTys);
12530 return getMachineNode(Opcode, dl, VTs, Ops);
12531}
12532
12534 SDVTList VTs,
12536 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
12538 void *IP = nullptr;
12539
12540 if (DoCSE) {
12542 AddNodeIDNode(ID, ~Opcode, VTs, Ops);
12543 IP = nullptr;
12544 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
12545 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL));
12546 }
12547 }
12548
12549 // Allocate a new MachineSDNode.
12550 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
12551 createOperands(N, Ops);
12552
12553 if (DoCSE)
12554 CSEMap.InsertNode(N, IP);
12555
12556 InsertNode(N);
12557 NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this);
12558 return N;
12559}
12560
12561/// getTargetExtractSubreg - A convenience function for creating
12562/// TargetOpcode::EXTRACT_SUBREG nodes.
12564 SDValue Operand) {
12565 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
12566 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
12567 VT, Operand, SRIdxVal);
12568 return SDValue(Subreg, 0);
12569}
12570
12571/// getTargetInsertSubreg - A convenience function for creating
12572/// TargetOpcode::INSERT_SUBREG nodes.
12574 SDValue Operand, SDValue Subreg) {
12575 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
12576 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
12577 VT, Operand, Subreg, SRIdxVal);
12578 return SDValue(Result, 0);
12579}
12580
12581/// getNodeIfExists - Get the specified node if it's already available, or
12582/// else return NULL.
12585 bool AllowCommute) {
12586 SDNodeFlags Flags;
12587 if (Inserter)
12588 Flags = Inserter->getFlags();
12589 return getNodeIfExists(Opcode, VTList, Ops, Flags, AllowCommute);
12590}
12591
12594 const SDNodeFlags Flags,
12595 bool AllowCommute) {
12596 if (VTList.VTs[VTList.NumVTs - 1] == MVT::Glue)
12597 return nullptr;
12598
12599 auto Lookup = [&](ArrayRef<SDValue> LookupOps) -> SDNode * {
12601 AddNodeIDNode(ID, Opcode, VTList, LookupOps);
12602 void *IP = nullptr;
12603 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) {
12604 E->intersectFlagsWith(Flags);
12605 return E;
12606 }
12607 return nullptr;
12608 };
12609
12610 if (SDNode *Existing = Lookup(Ops))
12611 return Existing;
12612
12613 if (AllowCommute && TLI->isCommutativeBinOp(Opcode))
12614 return Lookup({Ops[1], Ops[0]});
12615
12616 return nullptr;
12617}
12618
12619/// doesNodeExist - Check if a node exists without modifying its flags.
12620bool SelectionDAG::doesNodeExist(unsigned Opcode, SDVTList VTList,
12622 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
12624 AddNodeIDNode(ID, Opcode, VTList, Ops);
12625 void *IP = nullptr;
12626 if (FindNodeOrInsertPos(ID, SDLoc(), IP))
12627 return true;
12628 }
12629 return false;
12630}
12631
12632/// getDbgValue - Creates a SDDbgValue node.
12633///
12634/// SDNode
12636 SDNode *N, unsigned R, bool IsIndirect,
12637 const DebugLoc &DL, unsigned O) {
12638 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
12639 "Expected inlined-at fields to agree");
12640 return new (DbgInfo->getAlloc())
12641 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromNode(N, R),
12642 {}, IsIndirect, DL, O,
12643 /*IsVariadic=*/false);
12644}
12645
12646/// Constant
12648 DIExpression *Expr,
12649 const Value *C,
12650 const DebugLoc &DL, unsigned O) {
12651 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
12652 "Expected inlined-at fields to agree");
12653 return new (DbgInfo->getAlloc())
12654 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromConst(C), {},
12655 /*IsIndirect=*/false, DL, O,
12656 /*IsVariadic=*/false);
12657}
12658
12659/// FrameIndex
12661 DIExpression *Expr, unsigned FI,
12662 bool IsIndirect,
12663 const DebugLoc &DL,
12664 unsigned O) {
12665 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
12666 "Expected inlined-at fields to agree");
12667 return getFrameIndexDbgValue(Var, Expr, FI, {}, IsIndirect, DL, O);
12668}
12669
12670/// FrameIndex with dependencies
12672 DIExpression *Expr, unsigned FI,
12673 ArrayRef<SDNode *> Dependencies,
12674 bool IsIndirect,
12675 const DebugLoc &DL,
12676 unsigned O) {
12677 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
12678 "Expected inlined-at fields to agree");
12679 return new (DbgInfo->getAlloc())
12680 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromFrameIdx(FI),
12681 Dependencies, IsIndirect, DL, O,
12682 /*IsVariadic=*/false);
12683}
12684
12685/// VReg
12687 Register VReg, bool IsIndirect,
12688 const DebugLoc &DL, unsigned O) {
12689 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
12690 "Expected inlined-at fields to agree");
12691 return new (DbgInfo->getAlloc())
12692 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromVReg(VReg),
12693 {}, IsIndirect, DL, O,
12694 /*IsVariadic=*/false);
12695}
12696
12699 ArrayRef<SDNode *> Dependencies,
12700 bool IsIndirect, const DebugLoc &DL,
12701 unsigned O, bool IsVariadic) {
12702 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
12703 "Expected inlined-at fields to agree");
12704 return new (DbgInfo->getAlloc())
12705 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, Locs, Dependencies, IsIndirect,
12706 DL, O, IsVariadic);
12707}
12708
12710 unsigned OffsetInBits, unsigned SizeInBits,
12711 bool InvalidateDbg) {
12712 SDNode *FromNode = From.getNode();
12713 SDNode *ToNode = To.getNode();
12714 assert(FromNode && ToNode && "Can't modify dbg values");
12715
12716 // PR35338
12717 // TODO: assert(From != To && "Redundant dbg value transfer");
12718 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer");
12719 if (From == To || FromNode == ToNode)
12720 return;
12721
12722 if (!FromNode->getHasDebugValue())
12723 return;
12724
12725 SDDbgOperand FromLocOp =
12726 SDDbgOperand::fromNode(From.getNode(), From.getResNo());
12728
12730 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) {
12731 if (Dbg->isInvalidated())
12732 continue;
12733
12734 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value");
12735
12736 // Create a new location ops vector that is equal to the old vector, but
12737 // with each instance of FromLocOp replaced with ToLocOp.
12738 bool Changed = false;
12739 auto NewLocOps = Dbg->copyLocationOps();
12740 std::replace_if(
12741 NewLocOps.begin(), NewLocOps.end(),
12742 [&Changed, FromLocOp](const SDDbgOperand &Op) {
12743 bool Match = Op == FromLocOp;
12744 Changed |= Match;
12745 return Match;
12746 },
12747 ToLocOp);
12748 // Ignore this SDDbgValue if we didn't find a matching location.
12749 if (!Changed)
12750 continue;
12751
12752 DIVariable *Var = Dbg->getVariable();
12753 auto *Expr = Dbg->getExpression();
12754 // If a fragment is requested, update the expression.
12755 if (SizeInBits) {
12756 // When splitting a larger (e.g., sign-extended) value whose
12757 // lower bits are described with an SDDbgValue, do not attempt
12758 // to transfer the SDDbgValue to the upper bits.
12759 if (auto FI = Expr->getFragmentInfo())
12760 if (OffsetInBits + SizeInBits > FI->SizeInBits)
12761 continue;
12762 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits,
12763 SizeInBits);
12764 if (!Fragment)
12765 continue;
12766 Expr = *Fragment;
12767 }
12768
12769 auto AdditionalDependencies = Dbg->getAdditionalDependencies();
12770 // Clone the SDDbgValue and move it to To.
12771 SDDbgValue *Clone = getDbgValueList(
12772 Var, Expr, NewLocOps, AdditionalDependencies, Dbg->isIndirect(),
12773 Dbg->getDebugLoc(), std::max(ToNode->getIROrder(), Dbg->getOrder()),
12774 Dbg->isVariadic());
12775 ClonedDVs.push_back(Clone);
12776
12777 if (InvalidateDbg) {
12778 // Invalidate value and indicate the SDDbgValue should not be emitted.
12779 Dbg->setIsInvalidated();
12780 Dbg->setIsEmitted();
12781 }
12782 }
12783
12784 for (SDDbgValue *Dbg : ClonedDVs) {
12785 assert(is_contained(Dbg->getSDNodes(), ToNode) &&
12786 "Transferred DbgValues should depend on the new SDNode");
12787 AddDbgValue(Dbg, false);
12788 }
12789}
12790
12792 if (!N.getHasDebugValue())
12793 return;
12794
12795 auto GetLocationOperand = [](SDNode *Node, unsigned ResNo) {
12796 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(Node))
12797 return SDDbgOperand::fromFrameIdx(FISDN->getIndex());
12798 return SDDbgOperand::fromNode(Node, ResNo);
12799 };
12800
12802 for (auto *DV : GetDbgValues(&N)) {
12803 if (DV->isInvalidated())
12804 continue;
12805 switch (N.getOpcode()) {
12806 default:
12807 break;
12808 case ISD::ADD: {
12809 SDValue N0 = N.getOperand(0);
12810 SDValue N1 = N.getOperand(1);
12811 if (!isa<ConstantSDNode>(N0)) {
12812 bool RHSConstant = isa<ConstantSDNode>(N1);
12814 if (RHSConstant)
12815 Offset = N.getConstantOperandVal(1);
12816 // We are not allowed to turn indirect debug values variadic, so
12817 // don't salvage those.
12818 if (!RHSConstant && DV->isIndirect())
12819 continue;
12820
12821 // Rewrite an ADD constant node into a DIExpression. Since we are
12822 // performing arithmetic to compute the variable's *value* in the
12823 // DIExpression, we need to mark the expression with a
12824 // DW_OP_stack_value.
12825 auto *DIExpr = DV->getExpression();
12826 auto NewLocOps = DV->copyLocationOps();
12827 bool Changed = false;
12828 size_t OrigLocOpsSize = NewLocOps.size();
12829 for (size_t i = 0; i < OrigLocOpsSize; ++i) {
12830 // We're not given a ResNo to compare against because the whole
12831 // node is going away. We know that any ISD::ADD only has one
12832 // result, so we can assume any node match is using the result.
12833 if (NewLocOps[i].getKind() != SDDbgOperand::SDNODE ||
12834 NewLocOps[i].getSDNode() != &N)
12835 continue;
12836 NewLocOps[i] = GetLocationOperand(N0.getNode(), N0.getResNo());
12837 if (RHSConstant) {
12840 DIExpr = DIExpression::appendOpsToArg(DIExpr, ExprOps, i, true);
12841 } else {
12842 // Convert to a variadic expression (if not already).
12843 // convertToVariadicExpression() returns a const pointer, so we use
12844 // a temporary const variable here.
12845 const auto *TmpDIExpr =
12849 ExprOps.push_back(NewLocOps.size());
12850 ExprOps.push_back(dwarf::DW_OP_plus);
12851 SDDbgOperand RHS =
12853 NewLocOps.push_back(RHS);
12854 DIExpr = DIExpression::appendOpsToArg(TmpDIExpr, ExprOps, i, true);
12855 }
12856 Changed = true;
12857 }
12858 (void)Changed;
12859 assert(Changed && "Salvage target doesn't use N");
12860
12861 bool IsVariadic =
12862 DV->isVariadic() || OrigLocOpsSize != NewLocOps.size();
12863
12864 auto AdditionalDependencies = DV->getAdditionalDependencies();
12865 SDDbgValue *Clone = getDbgValueList(
12866 DV->getVariable(), DIExpr, NewLocOps, AdditionalDependencies,
12867 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder(), IsVariadic);
12868 ClonedDVs.push_back(Clone);
12869 DV->setIsInvalidated();
12870 DV->setIsEmitted();
12871 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting";
12872 N0.getNode()->dumprFull(this);
12873 dbgs() << " into " << *DIExpr << '\n');
12874 }
12875 break;
12876 }
12877 case ISD::TRUNCATE: {
12878 SDValue N0 = N.getOperand(0);
12879 TypeSize FromSize = N0.getValueSizeInBits();
12880 TypeSize ToSize = N.getValueSizeInBits(0);
12881
12882 DIExpression *DbgExpression = DV->getExpression();
12883 auto ExtOps = DIExpression::getExtOps(FromSize, ToSize, false);
12884 auto NewLocOps = DV->copyLocationOps();
12885 bool Changed = false;
12886 for (size_t i = 0; i < NewLocOps.size(); ++i) {
12887 if (NewLocOps[i].getKind() != SDDbgOperand::SDNODE ||
12888 NewLocOps[i].getSDNode() != &N)
12889 continue;
12890
12891 NewLocOps[i] = GetLocationOperand(N0.getNode(), N0.getResNo());
12892 DbgExpression = DIExpression::appendOpsToArg(DbgExpression, ExtOps, i);
12893 Changed = true;
12894 }
12895 assert(Changed && "Salvage target doesn't use N");
12896 (void)Changed;
12897
12898 SDDbgValue *Clone =
12899 getDbgValueList(DV->getVariable(), DbgExpression, NewLocOps,
12900 DV->getAdditionalDependencies(), DV->isIndirect(),
12901 DV->getDebugLoc(), DV->getOrder(), DV->isVariadic());
12902
12903 ClonedDVs.push_back(Clone);
12904 DV->setIsInvalidated();
12905 DV->setIsEmitted();
12906 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting"; N0.getNode()->dumprFull(this);
12907 dbgs() << " into " << *DbgExpression << '\n');
12908 break;
12909 }
12910 }
12911 }
12912
12913 for (SDDbgValue *Dbg : ClonedDVs) {
12914 assert((!Dbg->getSDNodes().empty() ||
12915 llvm::any_of(Dbg->getLocationOps(),
12916 [&](const SDDbgOperand &Op) {
12917 return Op.getKind() == SDDbgOperand::FRAMEIX;
12918 })) &&
12919 "Salvaged DbgValue should depend on a new SDNode");
12920 AddDbgValue(Dbg, false);
12921 }
12922}
12923
12924/// Creates a SDDbgLabel node.
12926 const DebugLoc &DL, unsigned O) {
12927 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&
12928 "Expected inlined-at fields to agree");
12929 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O);
12930}
12931
12932namespace {
12933
12934/// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
12935/// pointed to by a use iterator is deleted, increment the use iterator
12936/// so that it doesn't dangle.
12937///
12938class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
12941
12942 void NodeDeleted(SDNode *N, SDNode *E) override {
12943 // Increment the iterator as needed.
12944 while (UI != UE && N == UI->getUser())
12945 ++UI;
12946 }
12947
12948public:
12949 RAUWUpdateListener(SelectionDAG &d,
12952 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
12953};
12954
12955} // end anonymous namespace
12956
12957/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
12958/// This can cause recursive merging of nodes in the DAG.
12959///
12960/// This version assumes From has a single result value.
12961///
12963 SDNode *From = FromN.getNode();
12964 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
12965 "Cannot replace with this method!");
12966 assert(From != To.getNode() && "Cannot replace uses of with self");
12967
12968 // Preserve Debug Values
12969 transferDbgValues(FromN, To);
12970 // Preserve extra info.
12971 copyExtraInfo(From, To.getNode());
12972
12973 // Iterate over all the existing uses of From. New uses will be added
12974 // to the beginning of the use list, which we avoid visiting.
12975 // This specifically avoids visiting uses of From that arise while the
12976 // replacement is happening, because any such uses would be the result
12977 // of CSE: If an existing node looks like From after one of its operands
12978 // is replaced by To, we don't want to replace of all its users with To
12979 // too. See PR3018 for more info.
12980 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
12981 RAUWUpdateListener Listener(*this, UI, UE);
12982 while (UI != UE) {
12983 SDNode *User = UI->getUser();
12984
12985 // This node is about to morph, remove its old self from the CSE maps.
12986 RemoveNodeFromCSEMaps(User);
12987
12988 // A user can appear in a use list multiple times, and when this
12989 // happens the uses are usually next to each other in the list.
12990 // To help reduce the number of CSE recomputations, process all
12991 // the uses of this user that we can find this way.
12992 do {
12993 SDUse &Use = *UI;
12994 ++UI;
12995 Use.set(To);
12996 if (To->isDivergent() != From->isDivergent())
12998 } while (UI != UE && UI->getUser() == User);
12999 // Now that we have modified User, add it back to the CSE maps. If it
13000 // already exists there, recursively merge the results together.
13001 AddModifiedNodeToCSEMaps(User);
13002 }
13003
13004 // If we just RAUW'd the root, take note.
13005 if (FromN == getRoot())
13006 setRoot(To);
13007}
13008
13009/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
13010/// This can cause recursive merging of nodes in the DAG.
13011///
13012/// This version assumes that for each value of From, there is a
13013/// corresponding value in To in the same position with the same type.
13014///
13016#ifndef NDEBUG
13017 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
13018 assert((!From->hasAnyUseOfValue(i) ||
13019 From->getValueType(i) == To->getValueType(i)) &&
13020 "Cannot use this version of ReplaceAllUsesWith!");
13021#endif
13022
13023 // Handle the trivial case.
13024 if (From == To)
13025 return;
13026
13027 // Preserve Debug Info. Only do this if there's a use.
13028 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
13029 if (From->hasAnyUseOfValue(i)) {
13030 assert((i < To->getNumValues()) && "Invalid To location");
13031 transferDbgValues(SDValue(From, i), SDValue(To, i));
13032 }
13033 // Preserve extra info.
13034 copyExtraInfo(From, To);
13035
13036 // Iterate over just the existing users of From. See the comments in
13037 // the ReplaceAllUsesWith above.
13038 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
13039 RAUWUpdateListener Listener(*this, UI, UE);
13040 while (UI != UE) {
13041 SDNode *User = UI->getUser();
13042
13043 // This node is about to morph, remove its old self from the CSE maps.
13044 RemoveNodeFromCSEMaps(User);
13045
13046 // A user can appear in a use list multiple times, and when this
13047 // happens the uses are usually next to each other in the list.
13048 // To help reduce the number of CSE recomputations, process all
13049 // the uses of this user that we can find this way.
13050 do {
13051 SDUse &Use = *UI;
13052 ++UI;
13053 Use.setNode(To);
13054 if (To->isDivergent() != From->isDivergent())
13056 } while (UI != UE && UI->getUser() == User);
13057
13058 // Now that we have modified User, add it back to the CSE maps. If it
13059 // already exists there, recursively merge the results together.
13060 AddModifiedNodeToCSEMaps(User);
13061 }
13062
13063 // If we just RAUW'd the root, take note.
13064 if (From == getRoot().getNode())
13065 setRoot(SDValue(To, getRoot().getResNo()));
13066}
13067
13068/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
13069/// This can cause recursive merging of nodes in the DAG.
13070///
13071/// This version can replace From with any result values. To must match the
13072/// number and types of values returned by From.
13074 if (From->getNumValues() == 1) // Handle the simple case efficiently.
13075 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
13076
13077 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) {
13078 // Preserve Debug Info.
13079 transferDbgValues(SDValue(From, i), To[i]);
13080 // Preserve extra info.
13081 copyExtraInfo(From, To[i].getNode());
13082 }
13083
13084 // Iterate over just the existing users of From. See the comments in
13085 // the ReplaceAllUsesWith above.
13086 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
13087 RAUWUpdateListener Listener(*this, UI, UE);
13088 while (UI != UE) {
13089 SDNode *User = UI->getUser();
13090
13091 // This node is about to morph, remove its old self from the CSE maps.
13092 RemoveNodeFromCSEMaps(User);
13093
13094 // A user can appear in a use list multiple times, and when this happens the
13095 // uses are usually next to each other in the list. To help reduce the
13096 // number of CSE and divergence recomputations, process all the uses of this
13097 // user that we can find this way.
13098 bool To_IsDivergent = false;
13099 do {
13100 SDUse &Use = *UI;
13101 const SDValue &ToOp = To[Use.getResNo()];
13102 ++UI;
13103 Use.set(ToOp);
13104 if (ToOp.getValueType() != MVT::Other)
13105 To_IsDivergent |= ToOp->isDivergent();
13106 } while (UI != UE && UI->getUser() == User);
13107
13108 if (To_IsDivergent != From->isDivergent())
13110
13111 // Now that we have modified User, add it back to the CSE maps. If it
13112 // already exists there, recursively merge the results together.
13113 AddModifiedNodeToCSEMaps(User);
13114 }
13115
13116 // If we just RAUW'd the root, take note.
13117 if (From == getRoot().getNode())
13118 setRoot(SDValue(To[getRoot().getResNo()]));
13119}
13120
13121/// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
13122/// uses of other values produced by From.getNode() alone. The Deleted
13123/// vector is handled the same way as for ReplaceAllUsesWith.
13125 // Handle the really simple, really trivial case efficiently.
13126 if (From == To) return;
13127
13128 // Handle the simple, trivial, case efficiently.
13129 if (From.getNode()->getNumValues() == 1) {
13130 ReplaceAllUsesWith(From, To);
13131 return;
13132 }
13133
13134 // Preserve Debug Info.
13135 transferDbgValues(From, To);
13136 copyExtraInfo(From.getNode(), To.getNode());
13137
13138 // Iterate over just the existing users of From. See the comments in
13139 // the ReplaceAllUsesWith above.
13140 SDNode::use_iterator UI = From.getNode()->use_begin(),
13141 UE = From.getNode()->use_end();
13142 RAUWUpdateListener Listener(*this, UI, UE);
13143 while (UI != UE) {
13144 SDNode *User = UI->getUser();
13145 bool UserRemovedFromCSEMaps = false;
13146
13147 // A user can appear in a use list multiple times, and when this
13148 // happens the uses are usually next to each other in the list.
13149 // To help reduce the number of CSE recomputations, process all
13150 // the uses of this user that we can find this way.
13151 do {
13152 SDUse &Use = *UI;
13153
13154 // Skip uses of different values from the same node.
13155 if (Use.getResNo() != From.getResNo()) {
13156 ++UI;
13157 continue;
13158 }
13159
13160 // If this node hasn't been modified yet, it's still in the CSE maps,
13161 // so remove its old self from the CSE maps.
13162 if (!UserRemovedFromCSEMaps) {
13163 RemoveNodeFromCSEMaps(User);
13164 UserRemovedFromCSEMaps = true;
13165 }
13166
13167 ++UI;
13168 Use.set(To);
13169 if (To->isDivergent() != From->isDivergent())
13171 } while (UI != UE && UI->getUser() == User);
13172 // We are iterating over all uses of the From node, so if a use
13173 // doesn't use the specific value, no changes are made.
13174 if (!UserRemovedFromCSEMaps)
13175 continue;
13176
13177 // Now that we have modified User, add it back to the CSE maps. If it
13178 // already exists there, recursively merge the results together.
13179 AddModifiedNodeToCSEMaps(User);
13180 }
13181
13182 // If we just RAUW'd the root, take note.
13183 if (From == getRoot())
13184 setRoot(To);
13185}
13186
13187namespace {
13188
13189/// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
13190/// to record information about a use.
13191struct UseMemo {
13192 SDNode *User;
13193 unsigned Index;
13194 SDUse *Use;
13195};
13196
13197/// operator< - Sort Memos by User.
13198bool operator<(const UseMemo &L, const UseMemo &R) {
13199 return (intptr_t)L.User < (intptr_t)R.User;
13200}
13201
13202/// RAUOVWUpdateListener - Helper for ReplaceAllUsesOfValuesWith - When the node
13203/// pointed to by a UseMemo is deleted, set the User to nullptr to indicate that
13204/// the node already has been taken care of recursively.
13205class RAUOVWUpdateListener : public SelectionDAG::DAGUpdateListener {
13206 SmallVectorImpl<UseMemo> &Uses;
13207
13208 void NodeDeleted(SDNode *N, SDNode *E) override {
13209 for (UseMemo &Memo : Uses)
13210 if (Memo.User == N)
13211 Memo.User = nullptr;
13212 }
13213
13214public:
13215 RAUOVWUpdateListener(SelectionDAG &d, SmallVectorImpl<UseMemo> &uses)
13216 : SelectionDAG::DAGUpdateListener(d), Uses(uses) {}
13217};
13218
13219} // end anonymous namespace
13220
13221/// Return true if a glue output should propagate divergence information.
13223 switch (Node->getOpcode()) {
13224 case ISD::CopyFromReg:
13225 case ISD::CopyToReg:
13226 return false;
13227 default:
13228 return true;
13229 }
13230
13231 llvm_unreachable("covered opcode switch");
13232}
13233
13235 if (TLI->isSDNodeAlwaysUniform(N)) {
13236 assert(!TLI->isSDNodeSourceOfDivergence(N, FLI, UA) &&
13237 "Conflicting divergence information!");
13238 return false;
13239 }
13240 if (TLI->isSDNodeSourceOfDivergence(N, FLI, UA))
13241 return true;
13242 for (const auto &Op : N->ops()) {
13243 EVT VT = Op.getValueType();
13244
13245 // Skip Chain. It does not carry divergence.
13246 if (VT != MVT::Other && Op.getNode()->isDivergent() &&
13247 (VT != MVT::Glue || gluePropagatesDivergence(Op.getNode())))
13248 return true;
13249 }
13250 return false;
13251}
13252
13254 SmallVector<SDNode *, 16> Worklist(1, N);
13255 do {
13256 N = Worklist.pop_back_val();
13257 bool IsDivergent = calculateDivergence(N);
13258 if (N->SDNodeBits.IsDivergent != IsDivergent) {
13259 N->SDNodeBits.IsDivergent = IsDivergent;
13260 llvm::append_range(Worklist, N->users());
13261 }
13262 } while (!Worklist.empty());
13263}
13264
13265void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
13267 Order.reserve(AllNodes.size());
13268 for (auto &N : allnodes()) {
13269 unsigned NOps = N.getNumOperands();
13270 Degree[&N] = NOps;
13271 if (0 == NOps)
13272 Order.push_back(&N);
13273 }
13274 for (size_t I = 0; I != Order.size(); ++I) {
13275 SDNode *N = Order[I];
13276 for (auto *U : N->users()) {
13277 unsigned &UnsortedOps = Degree[U];
13278 if (0 == --UnsortedOps)
13279 Order.push_back(U);
13280 }
13281 }
13282}
13283
13284#if !defined(NDEBUG) && LLVM_ENABLE_ABI_BREAKING_CHECKS
13285void SelectionDAG::VerifyDAGDivergence() {
13286 std::vector<SDNode *> TopoOrder;
13287 CreateTopologicalOrder(TopoOrder);
13288 for (auto *N : TopoOrder) {
13289 assert(calculateDivergence(N) == N->isDivergent() &&
13290 "Divergence bit inconsistency detected");
13291 }
13292}
13293#endif
13294
13295/// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
13296/// uses of other values produced by From.getNode() alone. The same value
13297/// may appear in both the From and To list. The Deleted vector is
13298/// handled the same way as for ReplaceAllUsesWith.
13300 const SDValue *To,
13301 unsigned Num){
13302 // Handle the simple, trivial case efficiently.
13303 if (Num == 1)
13304 return ReplaceAllUsesOfValueWith(*From, *To);
13305
13306 transferDbgValues(*From, *To);
13307 copyExtraInfo(From->getNode(), To->getNode());
13308
13309 // Read up all the uses and make records of them. This helps
13310 // processing new uses that are introduced during the
13311 // replacement process.
13313 for (unsigned i = 0; i != Num; ++i) {
13314 unsigned FromResNo = From[i].getResNo();
13315 SDNode *FromNode = From[i].getNode();
13316 for (SDUse &Use : FromNode->uses()) {
13317 if (Use.getResNo() == FromResNo) {
13318 UseMemo Memo = {Use.getUser(), i, &Use};
13319 Uses.push_back(Memo);
13320 }
13321 }
13322 }
13323
13324 // Sort the uses, so that all the uses from a given User are together.
13326 RAUOVWUpdateListener Listener(*this, Uses);
13327
13328 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
13329 UseIndex != UseIndexEnd; ) {
13330 // We know that this user uses some value of From. If it is the right
13331 // value, update it.
13332 SDNode *User = Uses[UseIndex].User;
13333 // If the node has been deleted by recursive CSE updates when updating
13334 // another node, then just skip this entry.
13335 if (User == nullptr) {
13336 ++UseIndex;
13337 continue;
13338 }
13339
13340 // This node is about to morph, remove its old self from the CSE maps.
13341 RemoveNodeFromCSEMaps(User);
13342
13343 // The Uses array is sorted, so all the uses for a given User
13344 // are next to each other in the list.
13345 // To help reduce the number of CSE recomputations, process all
13346 // the uses of this user that we can find this way.
13347 do {
13348 unsigned i = Uses[UseIndex].Index;
13349 SDUse &Use = *Uses[UseIndex].Use;
13350 ++UseIndex;
13351
13352 Use.set(To[i]);
13353 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
13354
13355 // Now that we have modified User, add it back to the CSE maps. If it
13356 // already exists there, recursively merge the results together.
13357 AddModifiedNodeToCSEMaps(User);
13358 }
13359}
13360
13361/// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
13362/// based on their topological order. It returns the maximum id and a vector
13363/// of the SDNodes* in assigned order by reference.
13365 unsigned DAGSize = 0;
13366
13367 // SortedPos tracks the progress of the algorithm. Nodes before it are
13368 // sorted, nodes after it are unsorted. When the algorithm completes
13369 // it is at the end of the list.
13370 allnodes_iterator SortedPos = allnodes_begin();
13371
13372 // Visit all the nodes. Move nodes with no operands to the front of
13373 // the list immediately. Annotate nodes that do have operands with their
13374 // operand count. Before we do this, the Node Id fields of the nodes
13375 // may contain arbitrary values. After, the Node Id fields for nodes
13376 // before SortedPos will contain the topological sort index, and the
13377 // Node Id fields for nodes At SortedPos and after will contain the
13378 // count of outstanding operands.
13380 checkForCycles(&N, this);
13381 unsigned Degree = N.getNumOperands();
13382 if (Degree == 0) {
13383 // A node with no uses, add it to the result array immediately.
13384 N.setNodeId(DAGSize++);
13385 allnodes_iterator Q(&N);
13386 if (Q != SortedPos)
13387 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
13388 assert(SortedPos != AllNodes.end() && "Overran node list");
13389 ++SortedPos;
13390 } else {
13391 // Temporarily use the Node Id as scratch space for the degree count.
13392 N.setNodeId(Degree);
13393 }
13394 }
13395
13396 // Visit all the nodes. As we iterate, move nodes into sorted order,
13397 // such that by the time the end is reached all nodes will be sorted.
13398 for (SDNode &Node : allnodes()) {
13399 SDNode *N = &Node;
13400 checkForCycles(N, this);
13401 // N is in sorted position, so all its uses have one less operand
13402 // that needs to be sorted.
13403 for (SDNode *P : N->users()) {
13404 unsigned Degree = P->getNodeId();
13405 assert(Degree != 0 && "Invalid node degree");
13406 --Degree;
13407 if (Degree == 0) {
13408 // All of P's operands are sorted, so P may sorted now.
13409 P->setNodeId(DAGSize++);
13410 if (P->getIterator() != SortedPos)
13411 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
13412 assert(SortedPos != AllNodes.end() && "Overran node list");
13413 ++SortedPos;
13414 } else {
13415 // Update P's outstanding operand count.
13416 P->setNodeId(Degree);
13417 }
13418 }
13419 if (Node.getIterator() == SortedPos) {
13420#ifndef NDEBUG
13422 SDNode *S = &*++I;
13423 dbgs() << "Overran sorted position:\n";
13424 S->dumprFull(this); dbgs() << "\n";
13425 dbgs() << "Checking if this is due to cycles\n";
13426 checkForCycles(this, true);
13427#endif
13428 llvm_unreachable(nullptr);
13429 }
13430 }
13431
13432 assert(SortedPos == AllNodes.end() &&
13433 "Topological sort incomplete!");
13434 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
13435 "First node in topological sort is not the entry token!");
13436 assert(AllNodes.front().getNodeId() == 0 &&
13437 "First node in topological sort has non-zero id!");
13438 assert(AllNodes.front().getNumOperands() == 0 &&
13439 "First node in topological sort has operands!");
13440 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
13441 "Last node in topologic sort has unexpected id!");
13442 assert(AllNodes.back().use_empty() &&
13443 "Last node in topologic sort has users!");
13444 assert(DAGSize == allnodes_size() && "Node count mismatch!");
13445 return DAGSize;
13446}
13447
13449 SmallVectorImpl<const SDNode *> &SortedNodes) const {
13450 SortedNodes.clear();
13451 // Node -> remaining number of outstanding operands.
13452 DenseMap<const SDNode *, unsigned> RemainingOperands;
13453
13454 // Put nodes without any operands into SortedNodes first.
13455 for (const SDNode &N : allnodes()) {
13456 checkForCycles(&N, this);
13457 unsigned NumOperands = N.getNumOperands();
13458 if (NumOperands == 0)
13459 SortedNodes.push_back(&N);
13460 else
13461 // Record their total number of outstanding operands.
13462 RemainingOperands[&N] = NumOperands;
13463 }
13464
13465 // A node is pushed into SortedNodes when all of its operands (predecessors in
13466 // the graph) are also in SortedNodes.
13467 for (unsigned i = 0U; i < SortedNodes.size(); ++i) {
13468 const SDNode *N = SortedNodes[i];
13469 for (const SDNode *U : N->users()) {
13470 // HandleSDNode is never part of a DAG and therefore has no entry in
13471 // RemainingOperands.
13472 if (U->getOpcode() == ISD::HANDLENODE)
13473 continue;
13474 unsigned &NumRemOperands = RemainingOperands[U];
13475 assert(NumRemOperands && "Invalid number of remaining operands");
13476 --NumRemOperands;
13477 if (!NumRemOperands)
13478 SortedNodes.push_back(U);
13479 }
13480 }
13481
13482 assert(SortedNodes.size() == AllNodes.size() && "Node count mismatch");
13483 assert(SortedNodes.front()->getOpcode() == ISD::EntryToken &&
13484 "First node in topological sort is not the entry token");
13485 assert(SortedNodes.front()->getNumOperands() == 0 &&
13486 "First node in topological sort has operands");
13487}
13488
13489/// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
13490/// value is produced by SD.
13491void SelectionDAG::AddDbgValue(SDDbgValue *DB, bool isParameter) {
13492 for (SDNode *SD : DB->getSDNodes()) {
13493 if (!SD)
13494 continue;
13495 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
13496 SD->setHasDebugValue(true);
13497 }
13498 DbgInfo->add(DB, isParameter);
13499}
13500
13501void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { DbgInfo->add(DB); }
13502
13504 SDValue NewMemOpChain) {
13505 assert(isa<MemSDNode>(NewMemOpChain) && "Expected a memop node");
13506 assert(NewMemOpChain.getValueType() == MVT::Other && "Expected a token VT");
13507 // The new memory operation must have the same position as the old load in
13508 // terms of memory dependency. Create a TokenFactor for the old load and new
13509 // memory operation and update uses of the old load's output chain to use that
13510 // TokenFactor.
13511 if (OldChain == NewMemOpChain || OldChain.use_empty())
13512 return NewMemOpChain;
13513
13514 SDValue TokenFactor = getNode(ISD::TokenFactor, SDLoc(OldChain), MVT::Other,
13515 OldChain, NewMemOpChain);
13516 ReplaceAllUsesOfValueWith(OldChain, TokenFactor);
13517 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewMemOpChain);
13518 return TokenFactor;
13519}
13520
13522 SDValue NewMemOp) {
13523 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node");
13524 SDValue OldChain = SDValue(OldLoad, 1);
13525 SDValue NewMemOpChain = NewMemOp.getValue(1);
13526 return makeEquivalentMemoryOrdering(OldChain, NewMemOpChain);
13527}
13528
13530 Function **OutFunction) {
13531 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol");
13532
13533 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13534 auto *Module = MF->getFunction().getParent();
13535 auto *Function = Module->getFunction(Symbol);
13536
13537 if (OutFunction != nullptr)
13538 *OutFunction = Function;
13539
13540 if (Function != nullptr) {
13541 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace());
13542 return getGlobalAddress(Function, SDLoc(Op), PtrTy);
13543 }
13544
13545 std::string ErrorStr;
13546 raw_string_ostream ErrorFormatter(ErrorStr);
13547 ErrorFormatter << "Undefined external symbol ";
13548 ErrorFormatter << '"' << Symbol << '"';
13549 report_fatal_error(Twine(ErrorStr));
13550}
13551
13552//===----------------------------------------------------------------------===//
13553// SDNode Class
13554//===----------------------------------------------------------------------===//
13555
13558 return Const != nullptr && Const->isZero();
13559}
13560
13562 return V.isUndef() || isNullConstant(V);
13563}
13564
13567 return Const != nullptr && Const->isZero() && !Const->isNegative();
13568}
13569
13572 return Const != nullptr && Const->isAllOnes();
13573}
13574
13577 return Const != nullptr && Const->isOne();
13578}
13579
13582 return Const != nullptr && Const->isMinSignedValue();
13583}
13584
13586 SDValue V, unsigned OperandNo,
13587 unsigned Depth) const {
13588 APInt DemandedElts = getDemandAllEltsMask(V);
13589 return isIdentityElement(Opcode, Flags, V, DemandedElts, OperandNo, Depth);
13590}
13591
13593 SDValue V, const APInt &DemandedElts,
13594 unsigned OperandNo, unsigned Depth) const {
13595 // NOTE: The cases should match with IR's ConstantExpr::getBinOpIdentity().
13596 // TODO: Target-specific opcodes could be added.
13597 if (V.getValueType().isInteger()) {
13598 KnownBits Known = computeKnownBits(V, DemandedElts, Depth);
13599 if (Known.isConstant()) {
13600 const APInt &Const = Known.getConstant();
13601 switch (Opcode) {
13602 case ISD::ADD:
13603 case ISD::OR:
13604 case ISD::XOR:
13605 case ISD::UMAX:
13606 return Const.isZero();
13607 case ISD::MUL:
13608 return Const.isOne();
13609 case ISD::AND:
13610 case ISD::UMIN:
13611 return Const.isAllOnes();
13612 case ISD::SMAX:
13613 return Const.isMinSignedValue();
13614 case ISD::SMIN:
13615 return Const.isMaxSignedValue();
13616 case ISD::SUB:
13617 case ISD::SHL:
13618 case ISD::SRA:
13619 case ISD::SRL:
13620 return OperandNo == 1 && Const.isZero();
13621 case ISD::UDIV:
13622 case ISD::SDIV:
13623 return OperandNo == 1 && Const.isOne();
13624 }
13625 }
13626 } else if (auto *ConstFP = isConstOrConstSplatFP(V, DemandedElts)) {
13627 switch (Opcode) {
13628 case ISD::FADD:
13629 return ConstFP->isZero() &&
13630 (Flags.hasNoSignedZeros() || ConstFP->isNegative());
13631 case ISD::FSUB:
13632 return OperandNo == 1 && ConstFP->isZero() &&
13633 (Flags.hasNoSignedZeros() || !ConstFP->isNegative());
13634 case ISD::FMUL:
13635 return ConstFP->isExactlyValue(1.0);
13636 case ISD::FDIV:
13637 return OperandNo == 1 && ConstFP->isExactlyValue(1.0);
13638 case ISD::FMINNUM:
13639 case ISD::FMAXNUM: {
13640 // Neutral element for fminnum is NaN, Inf or FLT_MAX, depending on FMF.
13641 EVT VT = V.getValueType();
13642 const fltSemantics &Semantics = VT.getFltSemantics();
13643 APFloat NeutralAF = !Flags.hasNoNaNs() ? APFloat::getQNaN(Semantics)
13644 : !Flags.hasNoInfs() ? APFloat::getInf(Semantics)
13645 : APFloat::getLargest(Semantics);
13646 if (Opcode == ISD::FMAXNUM)
13647 NeutralAF.changeSign();
13648
13649 return ConstFP->isExactlyValue(NeutralAF);
13650 }
13651 }
13652 }
13653 return false;
13654}
13655
13657 while (V.getOpcode() == ISD::BITCAST)
13658 V = V.getOperand(0);
13659 return V;
13660}
13661
13663 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse())
13664 V = V.getOperand(0);
13665 return V;
13666}
13667
13669 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR)
13670 V = V.getOperand(0);
13671 return V;
13672}
13673
13675 while (V.getOpcode() == ISD::INSERT_VECTOR_ELT) {
13676 SDValue InVec = V.getOperand(0);
13677 SDValue EltNo = V.getOperand(2);
13678 EVT VT = InVec.getValueType();
13679 auto *IndexC = dyn_cast<ConstantSDNode>(EltNo);
13680 if (IndexC && VT.isFixedLengthVector() &&
13681 IndexC->getAPIntValue().ult(VT.getVectorNumElements()) &&
13682 !DemandedElts[IndexC->getZExtValue()]) {
13683 V = InVec;
13684 continue;
13685 }
13686 break;
13687 }
13688 return V;
13689}
13690
13692 while (V.getOpcode() == ISD::TRUNCATE)
13693 V = V.getOperand(0);
13694 return V;
13695}
13696
13697bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) {
13698 if (V.getOpcode() != ISD::XOR)
13699 return false;
13700 V = peekThroughBitcasts(V.getOperand(1));
13701 unsigned NumBits = V.getScalarValueSizeInBits();
13702 ConstantSDNode *C =
13703 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true);
13704 return C && (C->getAPIntValue().countr_one() >= NumBits);
13705}
13706
13708 bool AllowTruncation) {
13709 APInt DemandedElts = getDemandAllEltsMask(N);
13710 return isConstOrConstSplat(N, DemandedElts, AllowUndefs, AllowTruncation);
13711}
13712
13714 bool AllowUndefs,
13715 bool AllowTruncation) {
13717 return CN;
13718
13719 // SplatVectors can truncate their operands. Ignore that case here unless
13720 // AllowTruncation is set.
13721 if (N->getOpcode() == ISD::SPLAT_VECTOR) {
13722 EVT VecEltVT = N->getValueType(0).getVectorElementType();
13723 if (auto *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
13724 EVT CVT = CN->getValueType(0);
13725 assert(CVT.bitsGE(VecEltVT) && "Illegal splat_vector element extension");
13726 if (AllowTruncation || CVT == VecEltVT)
13727 return CN;
13728 }
13729 }
13730
13732 BitVector UndefElements;
13733 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements);
13734
13735 // BuildVectors can truncate their operands. Ignore that case here unless
13736 // AllowTruncation is set.
13737 // TODO: Look into whether we should allow UndefElements in non-DemandedElts
13738 if (CN && (UndefElements.none() || AllowUndefs)) {
13739 EVT CVT = CN->getValueType(0);
13740 EVT NSVT = N.getValueType().getScalarType();
13741 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
13742 if (AllowTruncation || (CVT == NSVT))
13743 return CN;
13744 }
13745 }
13746
13747 return nullptr;
13748}
13749
13751 APInt DemandedElts = getDemandAllEltsMask(N);
13752 return isConstOrConstSplatFP(N, DemandedElts, AllowUndefs);
13753}
13754
13756 const APInt &DemandedElts,
13757 bool AllowUndefs) {
13759 return CN;
13760
13762 BitVector UndefElements;
13763 ConstantFPSDNode *CN =
13764 BV->getConstantFPSplatNode(DemandedElts, &UndefElements);
13765 // TODO: Look into whether we should allow UndefElements in non-DemandedElts
13766 if (CN && (UndefElements.none() || AllowUndefs))
13767 return CN;
13768 }
13769
13770 if (N.getOpcode() == ISD::SPLAT_VECTOR)
13771 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N.getOperand(0)))
13772 return CN;
13773
13774 return nullptr;
13775}
13776
13777bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) {
13778 // TODO: may want to use peekThroughBitcast() here.
13779 ConstantSDNode *C =
13780 isConstOrConstSplat(N, AllowUndefs, /*AllowTruncation=*/true);
13781 return C && C->isZero();
13782}
13783
13784bool llvm::isOneOrOneSplat(SDValue N, bool AllowUndefs) {
13785 ConstantSDNode *C =
13786 isConstOrConstSplat(N, AllowUndefs, /*AllowTruncation*/ true);
13787 return C && C->isOne();
13788}
13789
13790bool llvm::isOneOrOneSplatFP(SDValue N, bool AllowUndefs) {
13791 ConstantFPSDNode *C = isConstOrConstSplatFP(N, AllowUndefs);
13792 return C && C->isExactlyValue(1.0);
13793}
13794
13795bool llvm::isAllOnesOrAllOnesSplat(SDValue N, bool AllowUndefs) {
13797 unsigned BitWidth = N.getScalarValueSizeInBits();
13798 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs);
13799 return C && C->isAllOnes() && C->getValueSizeInBits(0) == BitWidth;
13800}
13801
13802bool llvm::isOnesOrOnesSplat(SDValue N, bool AllowUndefs) {
13803 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs);
13804 return C && APInt::isSameValue(C->getAPIntValue(),
13805 APInt(C->getAPIntValue().getBitWidth(), 1));
13806}
13807
13808bool llvm::isZeroOrZeroSplat(SDValue N, bool AllowUndefs) {
13810 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs, true);
13811 return C && C->isZero();
13812}
13813
13814bool llvm::isZeroOrZeroSplatFP(SDValue N, bool AllowUndefs) {
13815 ConstantFPSDNode *C = isConstOrConstSplatFP(N, AllowUndefs);
13816 return C && C->isZero();
13817}
13818
13822
13824 unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT memvt,
13826 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MemRefs(memrefs) {
13827 bool IsVolatile = false;
13828 bool IsNonTemporal = false;
13829 bool IsDereferenceable = true;
13830 bool IsInvariant = true;
13831 for (const MachineMemOperand *MMO : memoperands()) {
13832 IsVolatile |= MMO->isVolatile();
13833 IsNonTemporal |= MMO->isNonTemporal();
13834 IsDereferenceable &= MMO->isDereferenceable();
13835 IsInvariant &= MMO->isInvariant();
13836 }
13837 MemSDNodeBits.IsVolatile = IsVolatile;
13838 MemSDNodeBits.IsNonTemporal = IsNonTemporal;
13839 MemSDNodeBits.IsDereferenceable = IsDereferenceable;
13840 MemSDNodeBits.IsInvariant = IsInvariant;
13841
13842 // For the single-MMO case, we check here that the size of the memory operand
13843 // fits within the size of the MMO. This is because the MMO might indicate
13844 // only a possible address range instead of specifying the affected memory
13845 // addresses precisely.
13848 getMemOperand()->getSize().getValue())) &&
13849 "Size mismatch!");
13850}
13851
13852/// Profile - Gather unique data for the node.
13853///
13855 AddNodeIDNode(ID, this);
13856}
13857
13858namespace {
13859
13860 struct EVTArray {
13861 std::vector<EVT> VTs;
13862
13863 EVTArray() {
13864 VTs.reserve(MVT::VALUETYPE_SIZE);
13865 for (unsigned i = 0; i < MVT::VALUETYPE_SIZE; ++i)
13866 VTs.push_back(MVT((MVT::SimpleValueType)i));
13867 }
13868 };
13869
13870} // end anonymous namespace
13871
13872/// getValueTypeList - Return a pointer to the specified value type.
13873///
13874const EVT *SDNode::getValueTypeList(MVT VT) {
13875 static EVTArray SimpleVTArray;
13876
13877 assert(VT < MVT::VALUETYPE_SIZE && "Value type out of range!");
13878 return &SimpleVTArray.VTs[VT.SimpleTy];
13879}
13880
13881/// hasAnyUseOfValue - Return true if there are any use of the indicated
13882/// value. This method ignores uses of other values defined by this operation.
13883bool SDNode::hasAnyUseOfValue(unsigned Value) const {
13884 assert(Value < getNumValues() && "Bad value!");
13885
13886 for (SDUse &U : uses())
13887 if (U.getResNo() == Value)
13888 return true;
13889
13890 return false;
13891}
13892
13893/// isOnlyUserOf - Return true if this node is the only use of N.
13894bool SDNode::isOnlyUserOf(const SDNode *N) const {
13895 bool Seen = false;
13896 for (const SDNode *User : N->users()) {
13897 if (User == this)
13898 Seen = true;
13899 else
13900 return false;
13901 }
13902
13903 return Seen;
13904}
13905
13906/// Return true if the only users of N are contained in Nodes.
13908 bool Seen = false;
13909 for (const SDNode *User : N->users()) {
13910 if (llvm::is_contained(Nodes, User))
13911 Seen = true;
13912 else
13913 return false;
13914 }
13915
13916 return Seen;
13917}
13918
13919/// Return true if the referenced return value is an operand of N.
13920bool SDValue::isOperandOf(const SDNode *N) const {
13921 return is_contained(N->op_values(), *this);
13922}
13923
13924bool SDNode::isOperandOf(const SDNode *N) const {
13925 return any_of(N->op_values(),
13926 [this](SDValue Op) { return this == Op.getNode(); });
13927}
13928
13929/// reachesChainWithoutSideEffects - Return true if this operand (which must
13930/// be a chain) reaches the specified operand without crossing any
13931/// side-effecting instructions on any chain path. In practice, this looks
13932/// through token factors and non-volatile loads. In order to remain efficient,
13933/// this only looks a couple of nodes in, it does not do an exhaustive search.
13934///
13935/// Note that we only need to examine chains when we're searching for
13936/// side-effects; SelectionDAG requires that all side-effects are represented
13937/// by chains, even if another operand would force a specific ordering. This
13938/// constraint is necessary to allow transformations like splitting loads.
13940 unsigned Depth) const {
13941 if (*this == Dest) return true;
13942
13943 // Don't search too deeply, we just want to be able to see through
13944 // TokenFactor's etc.
13945 if (Depth == 0) return false;
13946
13947 // If this is a token factor, all inputs to the TF happen in parallel.
13948 if (getOpcode() == ISD::TokenFactor) {
13949 // First, try a shallow search.
13950 if (is_contained((*this)->ops(), Dest)) {
13951 // We found the chain we want as an operand of this TokenFactor.
13952 // Essentially, we reach the chain without side-effects if we could
13953 // serialize the TokenFactor into a simple chain of operations with
13954 // Dest as the last operation. This is automatically true if the
13955 // chain has one use: there are no other ordering constraints.
13956 // If the chain has more than one use, we give up: some other
13957 // use of Dest might force a side-effect between Dest and the current
13958 // node.
13959 if (Dest.hasOneUse())
13960 return true;
13961 }
13962 // Next, try a deep search: check whether every operand of the TokenFactor
13963 // reaches Dest.
13964 return llvm::all_of((*this)->ops(), [=](SDValue Op) {
13965 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
13966 });
13967 }
13968
13969 // Loads don't have side effects, look through them.
13970 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
13971 if (Ld->isUnordered())
13972 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
13973 }
13974 return false;
13975}
13976
13977bool SDNode::hasPredecessor(const SDNode *N) const {
13980 Worklist.push_back(this);
13981 return hasPredecessorHelper(N, Visited, Worklist);
13982}
13983
13985 this->Flags &= Flags;
13986}
13987
13988SDValue
13990 ArrayRef<ISD::NodeType> CandidateBinOps,
13991 bool AllowPartials) {
13992 // The pattern must end in an extract from index 0.
13993 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
13994 !isNullConstant(Extract->getOperand(1)))
13995 return SDValue();
13996
13997 // Match against one of the candidate binary ops.
13998 SDValue Op = Extract->getOperand(0);
13999 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) {
14000 return Op.getOpcode() == unsigned(BinOp);
14001 }))
14002 return SDValue();
14003
14004 // Floating-point reductions may require relaxed constraints on the final step
14005 // of the reduction because they may reorder intermediate operations.
14006 unsigned CandidateBinOp = Op.getOpcode();
14007 if (Op.getValueType().isFloatingPoint()) {
14008 SDNodeFlags Flags = Op->getFlags();
14009 switch (CandidateBinOp) {
14010 case ISD::FADD:
14011 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation())
14012 return SDValue();
14013 break;
14014 default:
14015 llvm_unreachable("Unhandled FP opcode for binop reduction");
14016 }
14017 }
14018
14019 // Matching failed - attempt to see if we did enough stages that a partial
14020 // reduction from a subvector is possible.
14021 auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) {
14022 if (!AllowPartials || !Op)
14023 return SDValue();
14024 EVT OpVT = Op.getValueType();
14025 EVT OpSVT = OpVT.getScalarType();
14026 EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts);
14027 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0))
14028 return SDValue();
14029 BinOp = (ISD::NodeType)CandidateBinOp;
14030 return getExtractSubvector(SDLoc(Op), SubVT, Op, 0);
14031 };
14032
14033 // At each stage, we're looking for something that looks like:
14034 // %s = shufflevector <8 x i32> %op, <8 x i32> undef,
14035 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef,
14036 // i32 undef, i32 undef, i32 undef, i32 undef>
14037 // %a = binop <8 x i32> %op, %s
14038 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid,
14039 // we expect something like:
14040 // <4,5,6,7,u,u,u,u>
14041 // <2,3,u,u,u,u,u,u>
14042 // <1,u,u,u,u,u,u,u>
14043 // While a partial reduction match would be:
14044 // <2,3,u,u,u,u,u,u>
14045 // <1,u,u,u,u,u,u,u>
14046 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements());
14047 SDValue PrevOp;
14048 for (unsigned i = 0; i < Stages; ++i) {
14049 unsigned MaskEnd = (1 << i);
14050
14051 if (Op.getOpcode() != CandidateBinOp)
14052 return PartialReduction(PrevOp, MaskEnd);
14053
14054 SDValue Op0 = Op.getOperand(0);
14055 SDValue Op1 = Op.getOperand(1);
14056
14058 if (Shuffle) {
14059 Op = Op1;
14060 } else {
14061 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1);
14062 Op = Op0;
14063 }
14064
14065 // The first operand of the shuffle should be the same as the other operand
14066 // of the binop.
14067 if (!Shuffle || Shuffle->getOperand(0) != Op)
14068 return PartialReduction(PrevOp, MaskEnd);
14069
14070 // Verify the shuffle has the expected (at this stage of the pyramid) mask.
14071 for (int Index = 0; Index < (int)MaskEnd; ++Index)
14072 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index))
14073 return PartialReduction(PrevOp, MaskEnd);
14074
14075 PrevOp = Op;
14076 }
14077
14078 // Handle subvector reductions, which tend to appear after the shuffle
14079 // reduction stages.
14080 while (Op.getOpcode() == CandidateBinOp) {
14081 unsigned NumElts = Op.getValueType().getVectorNumElements();
14082 SDValue Op0 = Op.getOperand(0);
14083 SDValue Op1 = Op.getOperand(1);
14084 if (Op0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
14086 Op0.getOperand(0) != Op1.getOperand(0))
14087 break;
14088 SDValue Src = Op0.getOperand(0);
14089 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
14090 if (NumSrcElts != (2 * NumElts))
14091 break;
14092 if (!(Op0.getConstantOperandAPInt(1) == 0 &&
14093 Op1.getConstantOperandAPInt(1) == NumElts) &&
14094 !(Op1.getConstantOperandAPInt(1) == 0 &&
14095 Op0.getConstantOperandAPInt(1) == NumElts))
14096 break;
14097 Op = Src;
14098 }
14099
14100 BinOp = (ISD::NodeType)CandidateBinOp;
14101 return Op;
14102}
14103
14105 EVT VT = N->getValueType(0);
14106 EVT EltVT = VT.getVectorElementType();
14107 unsigned NE = VT.getVectorNumElements();
14108
14109 SDLoc dl(N);
14110
14111 // If ResNE is 0, fully unroll the vector op.
14112 if (ResNE == 0)
14113 ResNE = NE;
14114 else if (NE > ResNE)
14115 NE = ResNE;
14116
14117 if (N->getNumValues() == 2) {
14118 SmallVector<SDValue, 8> Scalars0, Scalars1;
14119 SmallVector<SDValue, 4> Operands(N->getNumOperands());
14120 EVT VT1 = N->getValueType(1);
14121 EVT EltVT1 = VT1.getVectorElementType();
14122
14123 unsigned i;
14124 for (i = 0; i != NE; ++i) {
14125 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
14126 SDValue Operand = N->getOperand(j);
14127 EVT OperandVT = Operand.getValueType();
14128
14129 // A vector operand; extract a single element.
14130 EVT OperandEltVT = OperandVT.getVectorElementType();
14131 Operands[j] = getExtractVectorElt(dl, OperandEltVT, Operand, i);
14132 }
14133
14134 SDValue EltOp = getNode(N->getOpcode(), dl, {EltVT, EltVT1}, Operands);
14135 Scalars0.push_back(EltOp);
14136 Scalars1.push_back(EltOp.getValue(1));
14137 }
14138
14139 for (; i < ResNE; ++i) {
14140 Scalars0.push_back(getUNDEF(EltVT));
14141 Scalars1.push_back(getUNDEF(EltVT1));
14142 }
14143
14144 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
14145 EVT VecVT1 = EVT::getVectorVT(*getContext(), EltVT1, ResNE);
14146 SDValue Vec0 = getBuildVector(VecVT, dl, Scalars0);
14147 SDValue Vec1 = getBuildVector(VecVT1, dl, Scalars1);
14148 return getMergeValues({Vec0, Vec1}, dl);
14149 }
14150
14151 assert(N->getNumValues() == 1 &&
14152 "Can't unroll a vector with multiple results!");
14153
14155 SmallVector<SDValue, 4> Operands(N->getNumOperands());
14156
14157 unsigned i;
14158 for (i= 0; i != NE; ++i) {
14159 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
14160 SDValue Operand = N->getOperand(j);
14161 EVT OperandVT = Operand.getValueType();
14162 if (OperandVT.isVector()) {
14163 // A vector operand; extract a single element.
14164 EVT OperandEltVT = OperandVT.getVectorElementType();
14165 Operands[j] = getExtractVectorElt(dl, OperandEltVT, Operand, i);
14166 } else {
14167 // A scalar operand; just use it as is.
14168 Operands[j] = Operand;
14169 }
14170 }
14171
14172 switch (N->getOpcode()) {
14173 default: {
14174 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands,
14175 N->getFlags()));
14176 break;
14177 }
14178 case ISD::VSELECT:
14179 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
14180 break;
14181 case ISD::SHL:
14182 case ISD::SRA:
14183 case ISD::SRL:
14184 case ISD::ROTL:
14185 case ISD::ROTR:
14186 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
14187 getShiftAmountOperand(Operands[0].getValueType(),
14188 Operands[1])));
14189 break;
14191 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
14192 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
14193 Operands[0],
14194 getValueType(ExtVT)));
14195 break;
14196 }
14197 case ISD::ADDRSPACECAST: {
14198 const auto *ASC = cast<AddrSpaceCastSDNode>(N);
14199 Scalars.push_back(getAddrSpaceCast(dl, EltVT, Operands[0],
14200 ASC->getSrcAddressSpace(),
14201 ASC->getDestAddressSpace()));
14202 break;
14203 }
14204 }
14205 }
14206
14207 for (; i < ResNE; ++i)
14208 Scalars.push_back(getUNDEF(EltVT));
14209
14210 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
14211 return getBuildVector(VecVT, dl, Scalars);
14212}
14213
14214std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp(
14215 SDNode *N, unsigned ResNE) {
14216 unsigned Opcode = N->getOpcode();
14217 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO ||
14218 Opcode == ISD::USUBO || Opcode == ISD::SSUBO ||
14219 Opcode == ISD::UMULO || Opcode == ISD::SMULO) &&
14220 "Expected an overflow opcode");
14221
14222 EVT ResVT = N->getValueType(0);
14223 EVT OvVT = N->getValueType(1);
14224 EVT ResEltVT = ResVT.getVectorElementType();
14225 EVT OvEltVT = OvVT.getVectorElementType();
14226 SDLoc dl(N);
14227
14228 // If ResNE is 0, fully unroll the vector op.
14229 unsigned NE = ResVT.getVectorNumElements();
14230 if (ResNE == 0)
14231 ResNE = NE;
14232 else if (NE > ResNE)
14233 NE = ResNE;
14234
14235 SmallVector<SDValue, 8> LHSScalars;
14236 SmallVector<SDValue, 8> RHSScalars;
14237 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE);
14238 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE);
14239
14240 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT);
14241 SDVTList VTs = getVTList(ResEltVT, SVT);
14242 SmallVector<SDValue, 8> ResScalars;
14243 SmallVector<SDValue, 8> OvScalars;
14244 for (unsigned i = 0; i < NE; ++i) {
14245 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]);
14246 SDValue Ov =
14247 getSelect(dl, OvEltVT, Res.getValue(1),
14248 getBoolConstant(true, dl, OvEltVT, ResVT),
14249 getConstant(0, dl, OvEltVT));
14250
14251 ResScalars.push_back(Res);
14252 OvScalars.push_back(Ov);
14253 }
14254
14255 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT));
14256 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT));
14257
14258 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE);
14259 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE);
14260 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars),
14261 getBuildVector(NewOvVT, dl, OvScalars));
14262}
14263
14266 unsigned Bytes,
14267 int Dist) const {
14268 if (LD->isVolatile() || Base->isVolatile())
14269 return false;
14270 // TODO: probably too restrictive for atomics, revisit
14271 if (!LD->isSimple())
14272 return false;
14273 if (LD->isIndexed() || Base->isIndexed())
14274 return false;
14275 if (LD->getChain() != Base->getChain())
14276 return false;
14277 EVT VT = LD->getMemoryVT();
14278 if (VT.getSizeInBits() / 8 != Bytes)
14279 return false;
14280
14281 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this);
14282 auto LocDecomp = BaseIndexOffset::match(LD, *this);
14283
14284 int64_t Offset = 0;
14285 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset))
14286 return (Dist * (int64_t)Bytes == Offset);
14287 return false;
14288}
14289
14290/// InferPtrAlignment - Infer alignment of a load / store address. Return
14291/// std::nullopt if it cannot be inferred.
14293 // If this is a GlobalAddress + cst, return the alignment.
14294 const GlobalValue *GV = nullptr;
14295 int64_t GVOffset = 0;
14296 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
14297 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
14298 KnownBits Known(PtrWidth);
14300 unsigned AlignBits = Known.countMinTrailingZeros();
14301 if (AlignBits)
14302 return commonAlignment(Align(1ull << std::min(31U, AlignBits)), GVOffset);
14303 }
14304
14305 // If this is a direct reference to a stack slot, use information about the
14306 // stack slot's alignment.
14307 int FrameIdx = INT_MIN;
14308 int64_t FrameOffset = 0;
14310 FrameIdx = FI->getIndex();
14311 } else if (isBaseWithConstantOffset(Ptr) &&
14313 // Handle FI+Cst
14314 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
14315 FrameOffset = Ptr.getConstantOperandVal(1);
14316 }
14317
14318 if (FrameIdx != INT_MIN) {
14320 return commonAlignment(MFI.getObjectAlign(FrameIdx), FrameOffset);
14321 }
14322
14323 return std::nullopt;
14324}
14325
14326/// Split the scalar node with EXTRACT_ELEMENT using the provided
14327/// VTs and return the low/high part.
14328std::pair<SDValue, SDValue> SelectionDAG::SplitScalar(const SDValue &N,
14329 const SDLoc &DL,
14330 const EVT &LoVT,
14331 const EVT &HiVT) {
14332 assert(!LoVT.isVector() && !HiVT.isVector() && !N.getValueType().isVector() &&
14333 "Split node must be a scalar type");
14334 SDValue Lo =
14336 SDValue Hi =
14338 return std::make_pair(Lo, Hi);
14339}
14340
14341/// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
14342/// which is split (or expanded) into two not necessarily identical pieces.
14343std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
14344 // Currently all types are split in half.
14345 EVT LoVT, HiVT;
14346 if (!VT.isVector())
14347 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
14348 else
14349 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext());
14350
14351 return std::make_pair(LoVT, HiVT);
14352}
14353
14354/// GetDependentSplitDestVTs - Compute the VTs needed for the low/hi parts of a
14355/// type, dependent on an enveloping VT that has been split into two identical
14356/// pieces. Sets the HiIsEmpty flag when hi type has zero storage size.
14357std::pair<EVT, EVT>
14359 bool *HiIsEmpty) const {
14360 EVT EltTp = VT.getVectorElementType();
14361 // Examples:
14362 // custom VL=8 with enveloping VL=8/8 yields 8/0 (hi empty)
14363 // custom VL=9 with enveloping VL=8/8 yields 8/1
14364 // custom VL=10 with enveloping VL=8/8 yields 8/2
14365 // etc.
14366 ElementCount VTNumElts = VT.getVectorElementCount();
14367 ElementCount EnvNumElts = EnvVT.getVectorElementCount();
14368 assert(VTNumElts.isScalable() == EnvNumElts.isScalable() &&
14369 "Mixing fixed width and scalable vectors when enveloping a type");
14370 EVT LoVT, HiVT;
14371 if (VTNumElts.getKnownMinValue() > EnvNumElts.getKnownMinValue()) {
14372 LoVT = EVT::getVectorVT(*getContext(), EltTp, EnvNumElts);
14373 HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts);
14374 *HiIsEmpty = false;
14375 } else {
14376 // Flag that hi type has zero storage size, but return split envelop type
14377 // (this would be easier if vector types with zero elements were allowed).
14378 LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts);
14379 HiVT = EVT::getVectorVT(*getContext(), EltTp, EnvNumElts);
14380 *HiIsEmpty = true;
14381 }
14382 return std::make_pair(LoVT, HiVT);
14383}
14384
14385/// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
14386/// low/high part.
14387std::pair<SDValue, SDValue>
14388SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
14389 const EVT &HiVT) {
14390 assert(LoVT.isScalableVector() == HiVT.isScalableVector() &&
14391 LoVT.isScalableVector() == N.getValueType().isScalableVector() &&
14392 "Splitting vector with an invalid mixture of fixed and scalable "
14393 "vector types");
14395 N.getValueType().getVectorMinNumElements() &&
14396 "More vector elements requested than available!");
14397 SDValue Lo, Hi;
14398 Lo = getExtractSubvector(DL, LoVT, N, 0);
14399 // For scalable vectors it is safe to use LoVT.getVectorMinNumElements()
14400 // (rather than having to use ElementCount), because EXTRACT_SUBVECTOR scales
14401 // IDX with the runtime scaling factor of the result vector type. For
14402 // fixed-width result vectors, that runtime scaling factor is 1.
14405 return std::make_pair(Lo, Hi);
14406}
14407
14408std::pair<SDValue, SDValue> SelectionDAG::SplitEVL(SDValue N, EVT VecVT,
14409 const SDLoc &DL) {
14410 // Split the vector length parameter.
14411 // %evl -> umin(%evl, %halfnumelts) and usubsat(%evl - %halfnumelts).
14412 EVT VT = N.getValueType();
14414 "Expecting the mask to be an evenly-sized vector");
14415 SDValue HalfNumElts = getElementCount(
14417 SDValue Lo = getNode(ISD::UMIN, DL, VT, N, HalfNumElts);
14418 SDValue Hi = getNode(ISD::USUBSAT, DL, VT, N, HalfNumElts);
14419 return std::make_pair(Lo, Hi);
14420}
14421
14422/// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
14424 EVT VT = N.getValueType();
14427 return getInsertSubvector(DL, getUNDEF(WideVT), N, 0);
14428}
14429
14432 unsigned Start, unsigned Count,
14433 EVT EltVT) {
14434 EVT VT = Op.getValueType();
14435 if (Count == 0)
14437 if (EltVT == EVT())
14438 EltVT = VT.getVectorElementType();
14439 SDLoc SL(Op);
14440 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
14441 Args.push_back(getExtractVectorElt(SL, EltVT, Op, i));
14442 }
14443}
14444
14445// getAddressSpace - Return the address space this GlobalAddress belongs to.
14447 return getGlobal()->getType()->getAddressSpace();
14448}
14449
14452 return Val.MachineCPVal->getType();
14453 return Val.ConstVal->getType();
14454}
14455
14456bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
14457 unsigned &SplatBitSize,
14458 bool &HasAnyUndefs,
14459 unsigned MinSplatBits,
14460 bool IsBigEndian) const {
14461 EVT VT = getValueType(0);
14462 assert(VT.isVector() && "Expected a vector type");
14463 unsigned VecWidth = VT.getSizeInBits();
14464 if (MinSplatBits > VecWidth)
14465 return false;
14466
14467 // FIXME: The widths are based on this node's type, but build vectors can
14468 // truncate their operands.
14469 SplatValue = APInt(VecWidth, 0);
14470 SplatUndef = APInt(VecWidth, 0);
14471
14472 // Get the bits. Bits with undefined values (when the corresponding element
14473 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
14474 // in SplatValue. If any of the values are not constant, give up and return
14475 // false.
14476 unsigned int NumOps = getNumOperands();
14477 assert(NumOps > 0 && "isConstantSplat has 0-size build vector");
14478 unsigned EltWidth = VT.getScalarSizeInBits();
14479
14480 for (unsigned j = 0; j < NumOps; ++j) {
14481 unsigned i = IsBigEndian ? NumOps - 1 - j : j;
14482 SDValue OpVal = getOperand(i);
14483 unsigned BitPos = j * EltWidth;
14484
14485 if (OpVal.isUndef())
14486 SplatUndef.setBits(BitPos, BitPos + EltWidth);
14487 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal))
14488 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
14489 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
14490 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
14491 else
14492 return false;
14493 }
14494
14495 // The build_vector is all constants or undefs. Find the smallest element
14496 // size that splats the vector.
14497 HasAnyUndefs = (SplatUndef != 0);
14498
14499 // FIXME: This does not work for vectors with elements less than 8 bits.
14500 while (VecWidth > 8) {
14501 // If we can't split in half, stop here.
14502 if (VecWidth & 1)
14503 break;
14504
14505 unsigned HalfSize = VecWidth / 2;
14506 APInt HighValue = SplatValue.extractBits(HalfSize, HalfSize);
14507 APInt LowValue = SplatValue.extractBits(HalfSize, 0);
14508 APInt HighUndef = SplatUndef.extractBits(HalfSize, HalfSize);
14509 APInt LowUndef = SplatUndef.extractBits(HalfSize, 0);
14510
14511 // If the two halves do not match (ignoring undef bits), stop here.
14512 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
14513 MinSplatBits > HalfSize)
14514 break;
14515
14516 SplatValue = HighValue | LowValue;
14517 SplatUndef = HighUndef & LowUndef;
14518
14519 VecWidth = HalfSize;
14520 }
14521
14522 // FIXME: The loop above only tries to split in halves. But if the input
14523 // vector for example is <3 x i16> it wouldn't be able to detect a
14524 // SplatBitSize of 16. No idea if that is a design flaw currently limiting
14525 // optimizations. I guess that back in the days when this helper was created
14526 // vectors normally was power-of-2 sized.
14527
14528 SplatBitSize = VecWidth;
14529 return true;
14530}
14531
14533 BitVector *UndefElements) const {
14534 unsigned NumOps = getNumOperands();
14535 if (UndefElements) {
14536 UndefElements->clear();
14537 UndefElements->resize(NumOps);
14538 }
14539 assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
14540 if (!DemandedElts)
14541 return SDValue();
14542 SDValue Splatted;
14543 for (unsigned i = 0; i != NumOps; ++i) {
14544 if (!DemandedElts[i])
14545 continue;
14546 SDValue Op = getOperand(i);
14547 if (Op.isUndef()) {
14548 if (UndefElements)
14549 (*UndefElements)[i] = true;
14550 } else if (!Splatted) {
14551 Splatted = Op;
14552 } else if (Splatted != Op) {
14553 return SDValue();
14554 }
14555 }
14556
14557 if (!Splatted) {
14558 unsigned FirstDemandedIdx = DemandedElts.countr_zero();
14559 assert(getOperand(FirstDemandedIdx).isUndef() &&
14560 "Can only have a splat without a constant for all undefs.");
14561 return getOperand(FirstDemandedIdx);
14562 }
14563
14564 return Splatted;
14565}
14566
14568 APInt DemandedElts = APInt::getAllOnes(getNumOperands());
14569 return getSplatValue(DemandedElts, UndefElements);
14570}
14571
14573 SmallVectorImpl<SDValue> &Sequence,
14574 BitVector *UndefElements) const {
14575 unsigned NumOps = getNumOperands();
14576 Sequence.clear();
14577 if (UndefElements) {
14578 UndefElements->clear();
14579 UndefElements->resize(NumOps);
14580 }
14581 assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
14582 if (!DemandedElts || NumOps < 2 || !isPowerOf2_32(NumOps))
14583 return false;
14584
14585 // Set the undefs even if we don't find a sequence (like getSplatValue).
14586 if (UndefElements)
14587 for (unsigned I = 0; I != NumOps; ++I)
14588 if (DemandedElts[I] && getOperand(I).isUndef())
14589 (*UndefElements)[I] = true;
14590
14591 // Iteratively widen the sequence length looking for repetitions.
14592 for (unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) {
14593 Sequence.append(SeqLen, SDValue());
14594 for (unsigned I = 0; I != NumOps; ++I) {
14595 if (!DemandedElts[I])
14596 continue;
14597 SDValue &SeqOp = Sequence[I % SeqLen];
14599 if (Op.isUndef()) {
14600 if (!SeqOp)
14601 SeqOp = Op;
14602 continue;
14603 }
14604 if (SeqOp && !SeqOp.isUndef() && SeqOp != Op) {
14605 Sequence.clear();
14606 break;
14607 }
14608 SeqOp = Op;
14609 }
14610 if (!Sequence.empty())
14611 return true;
14612 }
14613
14614 assert(Sequence.empty() && "Failed to empty non-repeating sequence pattern");
14615 return false;
14616}
14617
14619 BitVector *UndefElements) const {
14620 APInt DemandedElts = APInt::getAllOnes(getNumOperands());
14621 return getRepeatedSequence(DemandedElts, Sequence, UndefElements);
14622}
14623
14626 BitVector *UndefElements) const {
14628 getSplatValue(DemandedElts, UndefElements));
14629}
14630
14633 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
14634}
14635
14638 BitVector *UndefElements) const {
14640 getSplatValue(DemandedElts, UndefElements));
14641}
14642
14647
14648int32_t
14650 uint32_t BitWidth) const {
14651 if (ConstantFPSDNode *CN =
14653 bool IsExact;
14654 APSInt IntVal(BitWidth);
14655 const APFloat &APF = CN->getValueAPF();
14656 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) !=
14657 APFloat::opOK ||
14658 !IsExact)
14659 return -1;
14660
14661 return IntVal.exactLogBase2();
14662 }
14663 return -1;
14664}
14665
14667 bool IsLittleEndian, unsigned DstEltSizeInBits,
14668 SmallVectorImpl<APInt> &RawBitElements, BitVector &UndefElements) const {
14669 // Early-out if this contains anything but Undef/Constant/ConstantFP.
14670 if (!isConstant())
14671 return false;
14672
14673 unsigned NumSrcOps = getNumOperands();
14674 unsigned SrcEltSizeInBits = getValueType(0).getScalarSizeInBits();
14675 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
14676 "Invalid bitcast scale");
14677
14678 // Extract raw src bits.
14679 SmallVector<APInt> SrcBitElements(NumSrcOps,
14680 APInt::getZero(SrcEltSizeInBits));
14681 BitVector SrcUndeElements(NumSrcOps, false);
14682
14683 for (unsigned I = 0; I != NumSrcOps; ++I) {
14685 if (Op.isUndef()) {
14686 SrcUndeElements.set(I);
14687 continue;
14688 }
14689 auto *CInt = dyn_cast<ConstantSDNode>(Op);
14690 auto *CFP = dyn_cast<ConstantFPSDNode>(Op);
14691 assert((CInt || CFP) && "Unknown constant");
14692 SrcBitElements[I] = CInt ? CInt->getAPIntValue().trunc(SrcEltSizeInBits)
14693 : CFP->getValueAPF().bitcastToAPInt();
14694 }
14695
14696 // Recast to dst width.
14697 recastRawBits(IsLittleEndian, DstEltSizeInBits, RawBitElements,
14698 SrcBitElements, UndefElements, SrcUndeElements);
14699 return true;
14700}
14701
14702void BuildVectorSDNode::recastRawBits(bool IsLittleEndian,
14703 unsigned DstEltSizeInBits,
14704 SmallVectorImpl<APInt> &DstBitElements,
14705 ArrayRef<APInt> SrcBitElements,
14706 BitVector &DstUndefElements,
14707 const BitVector &SrcUndefElements) {
14708 unsigned NumSrcOps = SrcBitElements.size();
14709 unsigned SrcEltSizeInBits = SrcBitElements[0].getBitWidth();
14710 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
14711 "Invalid bitcast scale");
14712 assert(NumSrcOps == SrcUndefElements.size() &&
14713 "Vector size mismatch");
14714
14715 unsigned NumDstOps = (NumSrcOps * SrcEltSizeInBits) / DstEltSizeInBits;
14716 DstUndefElements.clear();
14717 DstUndefElements.resize(NumDstOps, false);
14718 DstBitElements.assign(NumDstOps, APInt::getZero(DstEltSizeInBits));
14719
14720 // Concatenate src elements constant bits together into dst element.
14721 if (SrcEltSizeInBits <= DstEltSizeInBits) {
14722 unsigned Scale = DstEltSizeInBits / SrcEltSizeInBits;
14723 for (unsigned I = 0; I != NumDstOps; ++I) {
14724 DstUndefElements.set(I);
14725 APInt &DstBits = DstBitElements[I];
14726 for (unsigned J = 0; J != Scale; ++J) {
14727 unsigned Idx = (I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
14728 if (SrcUndefElements[Idx])
14729 continue;
14730 DstUndefElements.reset(I);
14731 const APInt &SrcBits = SrcBitElements[Idx];
14732 assert(SrcBits.getBitWidth() == SrcEltSizeInBits &&
14733 "Illegal constant bitwidths");
14734 DstBits.insertBits(SrcBits, J * SrcEltSizeInBits);
14735 }
14736 }
14737 return;
14738 }
14739
14740 // Split src element constant bits into dst elements.
14741 unsigned Scale = SrcEltSizeInBits / DstEltSizeInBits;
14742 for (unsigned I = 0; I != NumSrcOps; ++I) {
14743 if (SrcUndefElements[I]) {
14744 DstUndefElements.set(I * Scale, (I + 1) * Scale);
14745 continue;
14746 }
14747 const APInt &SrcBits = SrcBitElements[I];
14748 for (unsigned J = 0; J != Scale; ++J) {
14749 unsigned Idx = (I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
14750 APInt &DstBits = DstBitElements[Idx];
14751 DstBits = SrcBits.extractBits(DstEltSizeInBits, J * DstEltSizeInBits);
14752 }
14753 }
14754}
14755
14757 for (const SDValue &Op : op_values()) {
14758 unsigned Opc = Op.getOpcode();
14759 if (!Op.isUndef() && Opc != ISD::Constant && Opc != ISD::ConstantFP)
14760 return false;
14761 }
14762 return true;
14763}
14764
14765std::optional<std::pair<APInt, APInt>>
14767 unsigned NumOps = getNumOperands();
14768 if (NumOps < 2)
14769 return std::nullopt;
14770
14771 unsigned EltSize = getValueType(0).getScalarSizeInBits();
14772 APInt Start, Stride;
14773 int FirstIdx = -1, SecondIdx = -1;
14774
14775 // Find the first two non-undef constant elements to determine Start and
14776 // Stride, then verify all remaining elements match the sequence.
14777 for (unsigned I = 0; I < NumOps; ++I) {
14779 if (Op->isUndef())
14780 continue;
14781 if (!isa<ConstantSDNode>(Op))
14782 return std::nullopt;
14783
14784 APInt Val = getConstantOperandAPInt(I).trunc(EltSize);
14785 if (FirstIdx < 0) {
14786 FirstIdx = I;
14787 Start = Val;
14788 } else if (SecondIdx < 0) {
14789 SecondIdx = I;
14790 // Compute stride using modular arithmetic. Simple division would handle
14791 // common strides (1, 2, -1, etc.), but modular inverse maximizes matches.
14792 // Example: <0, poison, poison, 0xFF> has stride 0x55 since 3*0x55 = 0xFF
14793 // Note that modular arithmetic is agnostic to signed/unsigned.
14794 unsigned IdxDiff = I - FirstIdx;
14795 APInt ValDiff = Val - Start;
14796
14797 // Step 1: Factor out common powers of 2 from IdxDiff and ValDiff.
14798 unsigned CommonPow2Bits = llvm::countr_zero(IdxDiff);
14799 if (ValDiff.countr_zero() < CommonPow2Bits)
14800 return std::nullopt; // ValDiff not divisible by 2^CommonPow2Bits
14801 IdxDiff >>= CommonPow2Bits;
14802 ValDiff.lshrInPlace(CommonPow2Bits);
14803
14804 // Step 2: IdxDiff is now odd, so its inverse mod 2^EltSize exists.
14805 // TODO: There are 2^CommonPow2Bits valid strides; currently we only try
14806 // one, but we could try all candidates to handle more cases.
14807 Stride = ValDiff * APInt(EltSize, IdxDiff).multiplicativeInverse();
14808 if (Stride.isZero())
14809 return std::nullopt;
14810
14811 // Step 3: Adjust Start based on the first defined element's index.
14812 Start -= Stride * FirstIdx;
14813 } else {
14814 // Verify this element matches the sequence.
14815 if (Val != Start + Stride * I)
14816 return std::nullopt;
14817 }
14818 }
14819
14820 // Need at least two defined elements.
14821 if (SecondIdx < 0)
14822 return std::nullopt;
14823
14824 return std::make_pair(Start, Stride);
14825}
14826
14828 // Find the first non-undef value in the shuffle mask.
14829 unsigned i, e;
14830 for (i = 0, e = Mask.size(); i != e && Mask[i] < 0; ++i)
14831 /* search */;
14832
14833 // If all elements are undefined, this shuffle can be considered a splat
14834 // (although it should eventually get simplified away completely).
14835 if (i == e)
14836 return true;
14837
14838 // Make sure all remaining elements are either undef or the same as the first
14839 // non-undef value.
14840 for (int Idx = Mask[i]; i != e; ++i)
14841 if (Mask[i] >= 0 && Mask[i] != Idx)
14842 return false;
14843 return true;
14844}
14845
14846// Returns true if it is a constant integer BuildVector or constant integer,
14847// possibly hidden by a bitcast.
14849 SDValue N, bool AllowOpaques) const {
14851
14852 if (auto *C = dyn_cast<ConstantSDNode>(N))
14853 return AllowOpaques || !C->isOpaque();
14854
14856 return true;
14857
14858 // Treat a GlobalAddress supporting constant offset folding as a
14859 // constant integer.
14860 if (auto *GA = dyn_cast<GlobalAddressSDNode>(N))
14861 if (GA->getOpcode() == ISD::GlobalAddress &&
14862 TLI->isOffsetFoldingLegal(GA))
14863 return true;
14864
14865 if ((N.getOpcode() == ISD::SPLAT_VECTOR) &&
14866 isa<ConstantSDNode>(N.getOperand(0)))
14867 return true;
14868 return false;
14869}
14870
14871// Returns true if it is a constant float BuildVector or constant float.
14874 return true;
14875
14877 return true;
14878
14879 if ((N.getOpcode() == ISD::SPLAT_VECTOR) &&
14880 isa<ConstantFPSDNode>(N.getOperand(0)))
14881 return true;
14882
14883 return false;
14884}
14885
14886std::optional<bool> SelectionDAG::isBoolConstant(SDValue N) const {
14887 ConstantSDNode *Const =
14888 isConstOrConstSplat(N, false, /*AllowTruncation=*/true);
14889 if (!Const)
14890 return std::nullopt;
14891
14892 EVT VT = N->getValueType(0);
14893 const APInt CVal = Const->getAPIntValue().trunc(VT.getScalarSizeInBits());
14894 switch (TLI->getBooleanContents(N.getValueType())) {
14896 if (CVal.isOne())
14897 return true;
14898 if (CVal.isZero())
14899 return false;
14900 return std::nullopt;
14902 if (CVal.isAllOnes())
14903 return true;
14904 if (CVal.isZero())
14905 return false;
14906 return std::nullopt;
14908 return CVal[0];
14909 }
14910 llvm_unreachable("Unknown BooleanContent enum");
14911}
14912
14913void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
14914 assert(!Node->OperandList && "Node already has operands");
14916 "too many operands to fit into SDNode");
14917 SDUse *Ops = OperandRecycler.allocate(
14918 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator);
14919
14920 bool IsDivergent = false;
14921 for (unsigned I = 0; I != Vals.size(); ++I) {
14922 Ops[I].setUser(Node);
14923 Ops[I].setInitial(Vals[I]);
14924 EVT VT = Ops[I].getValueType();
14925
14926 // Skip Chain. It does not carry divergence.
14927 if (VT != MVT::Other &&
14928 (VT != MVT::Glue || gluePropagatesDivergence(Ops[I].getNode())) &&
14929 Ops[I].getNode()->isDivergent()) {
14930 IsDivergent = true;
14931 }
14932 }
14933 Node->NumOperands = Vals.size();
14934 Node->OperandList = Ops;
14935 if (!TLI->isSDNodeAlwaysUniform(Node)) {
14936 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, UA);
14937 Node->SDNodeBits.IsDivergent = IsDivergent;
14938 }
14939 checkForCycles(Node);
14940}
14941
14944 size_t Limit = SDNode::getMaxNumOperands();
14945 while (Vals.size() > Limit) {
14946 unsigned SliceIdx = Vals.size() - Limit;
14947 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit);
14948 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs);
14949 Vals.erase(Vals.begin() + SliceIdx, Vals.end());
14950 Vals.emplace_back(NewTF);
14951 }
14952 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals);
14953}
14954
14956 EVT VT, SDNodeFlags Flags) {
14957 switch (Opcode) {
14958 default:
14959 return SDValue();
14960 case ISD::ADD:
14961 case ISD::OR:
14962 case ISD::XOR:
14963 case ISD::UMAX:
14964 return getConstant(0, DL, VT);
14965 case ISD::MUL:
14966 return getConstant(1, DL, VT);
14967 case ISD::AND:
14968 case ISD::UMIN:
14969 return getAllOnesConstant(DL, VT);
14970 case ISD::SMAX:
14972 case ISD::SMIN:
14974 case ISD::FADD:
14975 // If flags allow, prefer positive zero since it's generally cheaper
14976 // to materialize on most targets.
14977 return getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, VT);
14978 case ISD::FMUL:
14979 return getConstantFP(1.0, DL, VT);
14980 case ISD::FMINNUM:
14981 case ISD::FMAXNUM: {
14982 // Neutral element for fminnum is NaN, Inf or FLT_MAX, depending on FMF.
14983 const fltSemantics &Semantics = VT.getFltSemantics();
14984 APFloat NeutralAF = !Flags.hasNoNaNs() ? APFloat::getQNaN(Semantics) :
14985 !Flags.hasNoInfs() ? APFloat::getInf(Semantics) :
14986 APFloat::getLargest(Semantics);
14987 if (Opcode == ISD::FMAXNUM)
14988 NeutralAF.changeSign();
14989
14990 return getConstantFP(NeutralAF, DL, VT);
14991 }
14992 case ISD::FMINIMUM:
14993 case ISD::FMAXIMUM: {
14994 // Neutral element for fminimum is Inf or FLT_MAX, depending on FMF.
14995 const fltSemantics &Semantics = VT.getFltSemantics();
14996 APFloat NeutralAF = !Flags.hasNoInfs() ? APFloat::getInf(Semantics)
14997 : APFloat::getLargest(Semantics);
14998 if (Opcode == ISD::FMAXIMUM)
14999 NeutralAF.changeSign();
15000
15001 return getConstantFP(NeutralAF, DL, VT);
15002 }
15003
15004 }
15005}
15006
15008 SDValue Acc, SDValue LHS,
15009 SDValue RHS) {
15010 EVT AccVT = Acc.getValueType();
15011 if (AccVT.isFloatingPoint()) {
15012 assert(Opc == ISD::PARTIAL_REDUCE_FMLA && "Unexpected opcode");
15013 SDValue NegRHS = getNode(ISD::FNEG, DL, RHS.getValueType(), RHS);
15014 return getNode(Opc, DL, AccVT, Acc, LHS, NegRHS);
15015 }
15017 "Unexpected opcode");
15018 SDValue NegAcc = getNegative(Acc, DL, AccVT);
15019 SDValue MLA = getNode(Opc, DL, AccVT, NegAcc, LHS, RHS);
15020 return getNegative(MLA, DL, AccVT);
15021}
15022
15023/// Helper used to make a call to a library function that has one argument of
15024/// pointer type.
15025///
15026/// Such functions include 'fegetmode', 'fesetenv' and some others, which are
15027/// used to get or set floating-point state. They have one argument of pointer
15028/// type, which points to the memory region containing bits of the
15029/// floating-point state. The value returned by such function is ignored in the
15030/// created call.
15031///
15032/// \param LibFunc Reference to library function (value of RTLIB::Libcall).
15033/// \param Ptr Pointer used to save/load state.
15034/// \param InChain Ingoing token chain.
15035/// \returns Outgoing chain token.
15037 SDValue InChain,
15038 const SDLoc &DLoc) {
15039 assert(InChain.getValueType() == MVT::Other && "Expected token chain");
15041 Args.emplace_back(Ptr, Ptr.getValueType().getTypeForEVT(*getContext()));
15042 RTLIB::LibcallImpl LibcallImpl =
15043 Libcalls->getLibcallImpl(static_cast<RTLIB::Libcall>(LibFunc));
15044 if (LibcallImpl == RTLIB::Unsupported)
15045 reportFatalUsageError("emitting call to unsupported libcall");
15046
15047 SDValue Callee =
15048 getExternalSymbol(LibcallImpl, TLI->getPointerTy(getDataLayout()));
15050 CLI.setDebugLoc(DLoc).setChain(InChain).setLibCallee(
15051 Libcalls->getLibcallImplCallingConv(LibcallImpl),
15052 Type::getVoidTy(*getContext()), Callee, std::move(Args));
15053 return TLI->LowerCallTo(CLI).second;
15054}
15055
15057 assert(From && To && "Invalid SDNode; empty source SDValue?");
15058 auto I = SDEI.find(From);
15059 if (I == SDEI.end())
15060 return;
15061
15062 // Use of operator[] on the DenseMap may cause an insertion, which invalidates
15063 // the iterator, hence the need to make a copy to prevent a use-after-free.
15064 NodeExtraInfo NEI = I->second;
15065 if (LLVM_LIKELY(!NEI.PCSections)) {
15066 // No deep copy required for the types of extra info set.
15067 //
15068 // FIXME: Investigate if other types of extra info also need deep copy. This
15069 // depends on the types of nodes they can be attached to: if some extra info
15070 // is only ever attached to nodes where a replacement To node is always the
15071 // node where later use and propagation of the extra info has the intended
15072 // semantics, no deep copy is required.
15073 SDEI[To] = std::move(NEI);
15074 return;
15075 }
15076
15077 const SDNode *EntrySDN = getEntryNode().getNode();
15078
15079 // We need to copy NodeExtraInfo to all _new_ nodes that are being introduced
15080 // through the replacement of From with To. Otherwise, replacements of a node
15081 // (From) with more complex nodes (To and its operands) may result in lost
15082 // extra info where the root node (To) is insignificant in further propagating
15083 // and using extra info when further lowering to MIR.
15084 //
15085 // In the first step pre-populate the visited set with the nodes reachable
15086 // from the old From node. This avoids copying NodeExtraInfo to parts of the
15087 // DAG that is not new and should be left untouched.
15088 SmallVector<const SDNode *> Leafs{From}; // Leafs reachable with VisitFrom.
15089 DenseSet<const SDNode *> FromReach; // The set of nodes reachable from From.
15090 auto VisitFrom = [&](auto &&Self, const SDNode *N, int MaxDepth) {
15091 if (MaxDepth == 0) {
15092 // Remember this node in case we need to increase MaxDepth and continue
15093 // populating FromReach from this node.
15094 Leafs.emplace_back(N);
15095 return;
15096 }
15097 if (!FromReach.insert(N).second)
15098 return;
15099 for (const SDValue &Op : N->op_values())
15100 Self(Self, Op.getNode(), MaxDepth - 1);
15101 };
15102
15103 // Copy extra info to To and all its transitive operands (that are new).
15105 auto DeepCopyTo = [&](auto &&Self, const SDNode *N) {
15106 if (FromReach.contains(N))
15107 return true;
15108 if (!Visited.insert(N).second)
15109 return true;
15110 if (EntrySDN == N)
15111 return false;
15112 for (const SDValue &Op : N->op_values()) {
15113 if (N == To && Op.getNode() == EntrySDN) {
15114 // Special case: New node's operand is the entry node; just need to
15115 // copy extra info to new node.
15116 break;
15117 }
15118 if (!Self(Self, Op.getNode()))
15119 return false;
15120 }
15121 // Copy only if entry node was not reached.
15122 SDEI[N] = std::move(NEI);
15123 return true;
15124 };
15125
15126 // We first try with a lower MaxDepth, assuming that the path to common
15127 // operands between From and To is relatively short. This significantly
15128 // improves performance in the common case. The initial MaxDepth is big
15129 // enough to avoid retry in the common case; the last MaxDepth is large
15130 // enough to avoid having to use the fallback below (and protects from
15131 // potential stack exhaustion from recursion).
15132 for (int PrevDepth = 0, MaxDepth = 16; MaxDepth <= 1024;
15133 PrevDepth = MaxDepth, MaxDepth *= 2, Visited.clear()) {
15134 // StartFrom is the previous (or initial) set of leafs reachable at the
15135 // previous maximum depth.
15137 std::swap(StartFrom, Leafs);
15138 for (const SDNode *N : StartFrom)
15139 VisitFrom(VisitFrom, N, MaxDepth - PrevDepth);
15140 if (LLVM_LIKELY(DeepCopyTo(DeepCopyTo, To)))
15141 return;
15142 // This should happen very rarely (reached the entry node).
15143 LLVM_DEBUG(dbgs() << __func__ << ": MaxDepth=" << MaxDepth << " too low\n");
15144 assert(!Leafs.empty());
15145 }
15146
15147 // This should not happen - but if it did, that means the subgraph reachable
15148 // from From has depth greater or equal to maximum MaxDepth, and VisitFrom()
15149 // could not visit all reachable common operands. Consequently, we were able
15150 // to reach the entry node.
15151 errs() << "warning: incomplete propagation of SelectionDAG::NodeExtraInfo\n";
15152 assert(false && "From subgraph too complex - increase max. MaxDepth?");
15153 // Best-effort fallback if assertions disabled.
15154 SDEI[To] = std::move(NEI);
15155}
15156
15157#ifndef NDEBUG
15158static void checkForCyclesHelper(const SDNode *N,
15161 const llvm::SelectionDAG *DAG) {
15162 // If this node has already been checked, don't check it again.
15163 if (Checked.count(N))
15164 return;
15165
15166 // If a node has already been visited on this depth-first walk, reject it as
15167 // a cycle.
15168 if (!Visited.insert(N).second) {
15169 errs() << "Detected cycle in SelectionDAG\n";
15170 dbgs() << "Offending node:\n";
15171 N->dumprFull(DAG); dbgs() << "\n";
15172 abort();
15173 }
15174
15175 for (const SDValue &Op : N->op_values())
15176 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
15177
15178 Checked.insert(N);
15179 Visited.erase(N);
15180}
15181#endif
15182
15184 const llvm::SelectionDAG *DAG,
15185 bool force) {
15186#ifndef NDEBUG
15187 bool check = force;
15188#ifdef EXPENSIVE_CHECKS
15189 check = true;
15190#endif // EXPENSIVE_CHECKS
15191 if (check) {
15192 assert(N && "Checking nonexistent SDNode");
15195 checkForCyclesHelper(N, visited, checked, DAG);
15196 }
15197#endif // !NDEBUG
15198}
15199
15200void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
15201 checkForCycles(DAG->getRoot().getNode(), DAG, force);
15202}
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isConstant(const MachineInstr &MI)
constexpr LLT S1
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define X(NUM, ENUM, NAME)
Definition ELF.h:853
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static std::optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
#define __asan_unpoison_memory_region(p, size)
Definition Compiler.h:592
#define LLVM_LIKELY(EXPR)
Definition Compiler.h:335
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseSet and SmallDenseSet classes.
This file contains constants used for implementing Dwarf debug support.
This file defines a hash set that can be used to remove duplication of nodes in a graph.
iv users
Definition IVUsers.cpp:48
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static constexpr Value * getValue(Ty &ValueOrUse)
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB)
static bool shouldLowerMemFuncForSize(const MachineFunction &MF)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition Lint.cpp:539
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
static bool isConstantSplatVector(SDValue N, APInt &SplatValue, unsigned MinSizeInBits)
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
This file contains the declarations for metadata subclasses.
#define T
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
#define P(N)
PowerPC Reduce CR logical Operation
const SmallVectorImpl< MachineOperand > & Cond
Remove Loads Into Fake Uses
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
Contains matchers for matching SelectionDAG nodes and values.
static Type * getValueType(Value *V, bool LookThroughCmp=false)
Returns the "element type" of the given value/instruction V.
This file contains some templates that are useful if you are working with the STL at all.
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo, BatchAAResults *BatchAA)
static SDValue getFixedOrScalableQuantity(SelectionDAG &DAG, const SDLoc &DL, EVT VT, Ty Quantity)
static std::pair< SDValue, SDValue > getRuntimeCallSDValueHelper(SDValue Chain, const SDLoc &dl, TargetLowering::ArgListTy &&Args, const CallInst *CI, RTLIB::Libcall Call, SelectionDAG *DAG, const TargetLowering *TLI)
static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo)
Lower the call to 'memset' intrinsic function into a series of store operations.
static std::optional< APInt > FoldValueWithUndef(unsigned Opcode, const APInt &C1, bool IsUndef1, const APInt &C2, bool IsUndef2)
static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step, SelectionDAG &DAG)
static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned OpC, SDVTList VTList, ArrayRef< SDValue > OpList)
static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, const TargetLowering &TLI, const ConstantDataArraySlice &Slice)
getMemsetStringVal - Similar to getMemsetValue.
static cl::opt< bool > EnableMemCpyDAGOpt("enable-memcpy-dag-opt", cl::Hidden, cl::init(true), cl::desc("Gang up loads and stores generated by inlining of memcpy"))
static bool haveNoCommonBitsSetCommutative(SDValue A, SDValue B)
static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList)
AddNodeIDValueTypes - Value type lists are intern'd so we can represent them solely with their pointe...
static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef< int > M)
Swaps the values of N1 and N2.
static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice)
Returns true if memcpy source is constant data.
static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo)
static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)
AddNodeIDOpcode - Add the node opcode to the NodeID data.
static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike)
static bool doNotCSE(SDNode *N)
doNotCSE - Return true if CSE should not be performed for this node.
static cl::opt< int > MaxLdStGlue("ldstmemcpy-glue-max", cl::desc("Number limit for gluing ld/st of memcpy."), cl::Hidden, cl::init(0))
static void AddNodeIDOperands(FoldingSetNodeID &ID, ArrayRef< SDValue > Ops)
AddNodeIDOperands - Various routines for adding operands to the NodeID data.
static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SelectionDAG &DAG)
Try to simplify vector concatenation to an input value, undef, or build vector.
static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, SelectionDAG &DAG, SDValue Ptr, int64_t Offset=0)
InferPointerInfo - If the specified ptr/offset is a frame index, infer a MachinePointerInfo record fr...
static bool isInTailCallPositionWrapper(const CallInst *CI, const SelectionDAG *SelDAG, bool AllowReturnsFirstArg)
static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N)
If this is an SDNode with special info, add this info to the NodeID data.
static bool gluePropagatesDivergence(const SDNode *Node)
Return true if a glue output should propagate divergence information.
static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G)
static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs)
makeVTList - Return an instance of the SDVTList struct initialized with the specified members.
static void checkForCyclesHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallPtrSetImpl< const SDNode * > &Checked, const llvm::SelectionDAG *DAG)
static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, SmallVector< SDValue, 32 > &OutChains, unsigned From, unsigned To, SmallVector< SDValue, 16 > &OutLoadChains, SmallVector< SDValue, 16 > &OutStoreChains)
static int isSignedOp(ISD::CondCode Opcode)
For an integer comparison, return 1 if the comparison is a signed operation and 2 if the result is an...
static std::optional< APInt > FoldValue(unsigned Opcode, const APInt &C1, const APInt &C2)
static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SelectionDAG &DAG)
static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, unsigned AS)
static cl::opt< unsigned > MaxSteps("has-predecessor-max-steps", cl::Hidden, cl::init(8192), cl::desc("DAG combiner limit number of steps when searching DAG " "for predecessor nodes"))
static APInt getDemandAllEltsMask(SDValue V)
Construct a DemandedElts mask which demands all elements of V.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:119
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
static void removeOperands(MachineInstr &MI, unsigned i)
static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR)
Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
static unsigned getSize(unsigned Kind)
static const fltSemantics & IEEEsingle()
Definition APFloat.h:296
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition APFloat.h:334
static constexpr roundingMode rmTowardZero
Definition APFloat.h:348
static const fltSemantics & BFloat()
Definition APFloat.h:295
static const fltSemantics & IEEEquad()
Definition APFloat.h:298
static const fltSemantics & IEEEdouble()
Definition APFloat.h:297
static constexpr roundingMode rmTowardNegative
Definition APFloat.h:347
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:344
static constexpr roundingMode rmTowardPositive
Definition APFloat.h:346
static const fltSemantics & IEEEhalf()
Definition APFloat.h:294
opStatus
IEEE-754R 7: Default exception handling.
Definition APFloat.h:360
static APFloat getQNaN(const fltSemantics &Sem, bool Negative=false, const APInt *payload=nullptr)
Factory for QNaN values.
Definition APFloat.h:1179
opStatus divide(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1267
void copySign(const APFloat &RHS)
Definition APFloat.h:1361
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition APFloat.cpp:5899
opStatus subtract(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1249
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
Definition APFloat.h:1521
opStatus add(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1240
bool isFinite() const
Definition APFloat.h:1543
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
Definition APFloat.h:1406
opStatus multiply(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1258
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
Definition APFloat.h:1294
bool isZero() const
Definition APFloat.h:1534
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
Definition APFloat.h:1197
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
Definition APFloat.h:1391
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Definition APFloat.h:1157
opStatus mod(const APFloat &RHS)
Definition APFloat.h:1285
bool isPosZero() const
Definition APFloat.h:1549
bool isNegZero() const
Definition APFloat.h:1550
void changeSign()
Definition APFloat.h:1356
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Definition APFloat.h:1168
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:2023
LLVM_ABI APInt usub_sat(const APInt &RHS) const
Definition APInt.cpp:2107
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition APInt.cpp:1616
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
void clearBit(unsigned BitPosition)
Set a given bit to 0.
Definition APInt.h:1429
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1055
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
Definition APInt.h:230
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition APInt.h:424
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1563
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
Definition APInt.h:1414
unsigned popcount() const
Count the number of bits set.
Definition APInt.h:1693
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
Definition APInt.h:1408
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
Definition APInt.cpp:640
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition APInt.cpp:1076
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1535
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:968
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1353
APInt abs() const
Get the absolute value.
Definition APInt.h:1818
LLVM_ABI APInt sadd_sat(const APInt &RHS) const
Definition APInt.cpp:2078
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:372
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition APInt.h:1189
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
Definition APInt.h:259
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition APInt.cpp:1709
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1511
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1118
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition APInt.h:210
bool isNegative() const
Determine sign of this APInt.
Definition APInt.h:330
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition APInt.cpp:1687
void clearAllBits()
Set every bit to 0.
Definition APInt.h:1419
LLVM_ABI APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
Definition APInt.cpp:1197
LLVM_ABI APInt reverseBits() const
Definition APInt.cpp:790
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
Definition APInt.h:841
bool sle(const APInt &RHS) const
Signed less or equal comparison.
Definition APInt.h:1173
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1662
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
Definition APInt.h:1651
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition APInt.h:1621
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
Definition APInt.cpp:652
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition APInt.h:220
LLVM_ABI APInt sshl_sat(const APInt &RHS) const
Definition APInt.cpp:2138
LLVM_ABI APInt ushl_sat(const APInt &RHS) const
Definition APInt.cpp:2152
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1084
static bool isSameValue(const APInt &I1, const APInt &I2, bool SignedCompare=false)
Determine if two APInts have the same value, after zero-extending or sign-extending (if SignedCompare...
Definition APInt.h:555
LLVM_ABI APInt rotl(unsigned rotateAmt) const
Rotate left by rotateAmt.
Definition APInt.cpp:1184
LLVM_ABI void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
Definition APInt.cpp:398
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
Definition APInt.h:1458
unsigned logBase2() const
Definition APInt.h:1784
LLVM_ABI APInt uadd_sat(const APInt &RHS) const
Definition APInt.cpp:2088
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:834
LLVM_ABI APInt multiplicativeInverse() const
Definition APInt.cpp:1317
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition APInt.cpp:1788
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:335
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1157
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
Definition APInt.cpp:1028
void setBits(unsigned loBit, unsigned hiBit)
Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
Definition APInt.h:1390
APInt shl(unsigned shiftAmt) const
Left-shift function.
Definition APInt.h:880
LLVM_ABI APInt byteSwap() const
Definition APInt.cpp:768
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition APInt.h:1264
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
void clearBits(unsigned LoBit, unsigned HiBit)
Clear the bits from LoBit (inclusive) to HiBit (exclusive) to 0.
Definition APInt.h:1440
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
Definition APInt.h:1411
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
Definition APInt.cpp:483
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
Definition APInt.h:1244
bool isOne() const
Determine if this is a value of 1.
Definition APInt.h:390
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition APInt.h:287
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
Definition APInt.h:865
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:858
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1228
LLVM_ABI APInt ssub_sat(const APInt &RHS) const
Definition APInt.cpp:2097
An arbitrary precision integer that knows its signedness.
Definition APSInt.h:24
unsigned getSrcAddressSpace() const
unsigned getDestAddressSpace() const
static Capacity get(size_t N)
Get the capacity of an array that can hold at least N elements.
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
Get the array size.
Definition ArrayRef.h:141
bool empty() const
Check if the array is empty.
Definition ArrayRef.h:136
This is an SDNode representing atomic operations.
static LLVM_ABI BaseIndexOffset match(const SDNode *N, const SelectionDAG &DAG)
Parses tree in N for base, index, offset addresses.
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
BitVector & reset()
Reset all bits in the bitvector.
Definition BitVector.h:409
void resize(unsigned N, bool t=false)
Grow or shrink the bitvector.
Definition BitVector.h:355
void clear()
Removes all bits from the bitvector.
Definition BitVector.h:349
BitVector & set()
Set all bits in the bitvector.
Definition BitVector.h:366
bool none() const
Returns true if none of the bits are set.
Definition BitVector.h:207
size_type size() const
Returns the number of bits in this bitvector.
Definition BitVector.h:178
const BlockAddress * getBlockAddress() const
The address of a basic block.
Definition Constants.h:1071
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
A "pseudo-class" with methods for operating on BUILD_VECTORs.
LLVM_ABI bool getConstantRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &RawBitElements, BitVector &UndefElements) const
Extract the raw bit data from a build vector of Undef, Constant or ConstantFP node elements.
static LLVM_ABI void recastRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &DstBitElements, ArrayRef< APInt > SrcBitElements, BitVector &DstUndefElements, const BitVector &SrcUndefElements)
Recast bit data SrcBitElements to DstEltSizeInBits wide elements.
LLVM_ABI bool getRepeatedSequence(const APInt &DemandedElts, SmallVectorImpl< SDValue > &Sequence, BitVector *UndefElements=nullptr) const
Find the shortest repeating sequence of values in the build vector.
LLVM_ABI ConstantFPSDNode * getConstantFPSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant FP or null if this is not a constant FP splat.
LLVM_ABI SDValue getSplatValue(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted value or a null value if this is not a splat.
LLVM_ABI bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
LLVM_ABI ConstantSDNode * getConstantSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant or null if this is not a constant splat.
LLVM_ABI int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, uint32_t BitWidth) const
If this is a constant FP splat and the splatted constant FP is an exact power or 2,...
LLVM_ABI std::optional< std::pair< APInt, APInt > > isArithmeticSequence() const
If this BuildVector is constant and represents an arithmetic sequence "<a, a+n, a+2n,...
LLVM_ABI bool isConstant() const
This class represents a function call, abstracting a target machine's calling convention.
bool isTailCall() const
static LLVM_ABI bool isValueValidForType(EVT VT, const APFloat &Val)
const APFloat & getValueAPF() const
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:420
const APFloat & getValue() const
Definition Constants.h:464
This is the shared class of boolean and integer constants.
Definition Constants.h:87
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
MachineConstantPoolValue * getMachineCPVal() const
const Constant * getConstVal() const
LLVM_ABI Type * getType() const
This class represents a range of values.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const
Return whether unsigned sub of the two ranges always/never overflows.
LLVM_ABI OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
LLVM_ABI KnownBits toKnownBits() const
Return known bits for values in this range.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const
Return whether unsigned mul of the two ranges always/never overflows.
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI ConstantRange multiply(const ConstantRange &Other, unsigned NoWrapKind=0) const
Return a new range representing the possible values resulting from a multiplication of a value in thi...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
OverflowResult
Represents whether an operation on the given constant range is known to always or never overflow.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI OverflowResult signedSubMayOverflow(const ConstantRange &Other) const
Return whether signed sub of the two ranges always/never overflows.
uint64_t getZExtValue() const
const APInt & getAPIntValue() const
This is an important base class in LLVM.
Definition Constant.h:43
LLVM_ABI Constant * getSplatValue(bool AllowPoison=false) const
If all elements of the vector constant have the same value, return that value.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
DWARF expression.
static LLVM_ABI ExtOps getExtOps(unsigned FromSize, unsigned ToSize, bool Signed)
Returns the ops for a zero- or sign-extension in a DIExpression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI const DIExpression * convertToVariadicExpression(const DIExpression *Expr)
If Expr is a non-variadic expression (i.e.
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
Base class for variables.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
bool isLittleEndian() const
Layout endianness...
Definition DataLayout.h:217
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
LLVM_ABI unsigned getPointerTypeSizeInBits(Type *) const
The pointer representation size in bits for this type.
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
A debug info location.
Definition DebugLoc.h:123
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
const char * getSymbol() const
This class is used to gather all the unique data bits of a node.
Definition FoldingSet.h:208
Data structure describing the variable locations in a function.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition Function.h:711
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:354
LLVM_ABI unsigned getAddressSpace() const
const GlobalValue * getGlobal() const
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
This class is used to form a handle around another node that is persistent and is updated across invo...
const SDValue & getValue() const
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Tracks which library functions to use for a particular subtarget.
LLVM_ABI CallingConv::ID getLibcallImplCallingConv(RTLIB::LibcallImpl Call) const
Get the CallingConv that should be used for the specified libcall.
LLVM_ABI RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const
Return the lowering's selection of implementation call for Call.
This SDNode is used for LIFETIME_START/LIFETIME_END values.
This class is used to represent ISD::LOAD nodes.
static LocationSize precise(uint64_t Value)
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
Machine Value Type.
SimpleValueType SimpleTy
static MVT getIntegerVT(unsigned BitWidth)
Abstract base class for all machine specific constantpool value subclasses.
virtual void addSelectionDAGCSEId(FoldingSetNodeID &ID)=0
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
void setObjectAlignment(int ObjectIdx, Align Alignment)
setObjectAlignment - Change the alignment of the specified stack object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
A description of a memory reference used in the backend.
const MDNode * getRanges() const
Return the range tag for the memory reference.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
This class contains meta information specific to a module.
An SDNode that represents everything that will be needed to construct a MachineInstr.
This class is used to represent an MGATHER node.
This class is used to represent an MLOAD node.
This class is used to represent an MSCATTER node.
This class is used to represent an MSTORE node.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
size_t getNumMemOperands() const
Return the number of memory operands.
LLVM_ABI MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT memvt, PointerUnion< MachineMemOperand *, MachineMemOperand ** > memrefs)
Constructor that supports single or multiple MMOs.
PointerUnion< MachineMemOperand *, MachineMemOperand ** > MemRefs
Memory reference information.
MachineMemOperand * getMemOperand() const
Return the unique MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
ArrayRef< MachineMemOperand * > memoperands() const
Return the memory operands for this node.
unsigned getRawSubclassData() const
Return the SubclassData value, without HasDebugValue.
EVT getMemoryVT() const
Return the type of the in-memory value.
Representation for a specific memory location.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
Definition Module.cpp:235
Represent a mutable reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:294
The optimization diagnostic interface.
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
Class to represent pointers.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A discriminated union of two or more pointer types, with the discriminator in the low bits of the poi...
bool isNull() const
Test if the pointer held in the union is null, regardless of which type it is.
Analysis providing profile information.
void Deallocate(SubClass *E)
Deallocate - Release storage for the pointed-to object.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Keeps track of dbg_value information through SDISel.
LLVM_ABI void add(SDDbgValue *V, bool isParameter)
LLVM_ABI void erase(const SDNode *Node)
Invalidate all DbgValues attached to the node and remove it from the Node-to-DbgValues map.
Holds the information from a dbg_label node through SDISel.
Holds the information for a single machine location through SDISel; either an SDNode,...
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(Register VReg)
static SDDbgOperand fromConst(const Value *Const)
@ SDNODE
Value is the result of an expression.
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
unsigned getIROrder() const
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
LLVM_ABI void dumprFull(const SelectionDAG *G=nullptr) const
printrFull to dbgs().
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool isDivergent() const
LLVM_ABI bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
static constexpr size_t getMaxNumOperands()
Return the maximum number of operands that a SDNode can hold.
iterator_range< use_iterator > uses()
MemSDNodeBitfields MemSDNodeBits
LLVM_ABI void Profile(FoldingSetNodeID &ID) const
Gather unique data for the node.
bool getHasDebugValue() const
SDNodeFlags getFlags() const
void setNodeId(int Id)
Set unique node id.
LLVM_ABI void intersectFlagsWith(const SDNodeFlags Flags)
Clear any flags in this node that aren't also set in Flags.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
bool use_empty() const
Return true if there are no uses of this node.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
static LLVM_ABI bool areOnlyUsersOf(ArrayRef< const SDNode * > Nodes, const SDNode *N)
Return true if all the users of N are contained in Nodes.
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
LLVM_ABI bool isOperandOf(const SDNode *N) const
Return true if this node is an operand of N.
const APInt & getConstantOperandAPInt(unsigned Num) const
Helper method returns the APInt of a ConstantSDNode operand.
std::optional< APInt > bitcastToAPInt() const
LLVM_ABI bool hasPredecessor(const SDNode *N) const
Return true if N is a predecessor of this node.
LLVM_ABI bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
op_iterator op_end() const
op_iterator op_begin() const
static use_iterator use_end()
LLVM_ABI void DropOperands()
Release the operands and set this node to have zero operands.
SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
Create an SDNode.
Represents a use of a SDNode.
SDNode * getUser()
This returns the SDNode that contains this Use.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isUndef() const
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
LLVM_ABI bool isOperandOf(const SDNode *N) const
Return true if the referenced return value is an operand of N.
SDValue()=default
LLVM_ABI bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
bool use_empty() const
Return true if there are no nodes using value ResNo of Node.
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
virtual void verifyTargetNode(const SelectionDAG &DAG, const SDNode *N) const
Checks that the given target-specific node is valid. Aborts if it is not.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC)
LLVM_ABI Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
LLVM_ABI SDValue getVPZeroExtendInReg(SDValue Op, SDValue Mask, SDValue EVL, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
LLVM_ABI SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op)
Return the specified value casted to the target's desired shift amount type.
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI std::pair< SDValue, SDValue > getMemccpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue C, SDValue Size, const CallInst *CI)
Lower a memccpy operation into a target library call and return the resulting chain and call result a...
LLVM_ABI bool isKnownNeverLogicalZero(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
Test whether the given floating point SDValue (or all elements of it, if it is a vector) is known to ...
LLVM_ABI SDValue getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, bool IsExpanding=false)
SDValue getExtractVectorElt(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Extract element at Idx from Vec.
LLVM_ABI SDValue getSplatSourceVector(SDValue V, int &SplatIndex)
If V is a splatted value, return the source vector and its splat index.
LLVM_ABI SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root, MCSymbol *Label)
LLVM_ABI OverflowKind computeOverflowForUnsignedSub(SDValue N0, SDValue N1) const
Determine if the result of the unsigned sub of 2 nodes can overflow.
LLVM_ABI unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI std::pair< SDValue, SDValue > getStrlen(SDValue Chain, const SDLoc &dl, SDValue Src, const CallInst *CI)
Lower a strlen operation into a target library call and return the resulting chain and call result as...
LLVM_ABI SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
LLVM_ABI SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl, SDNodeFlags Flags={})
Constant fold a setcc to true or false.
bool isKnownNeverSNaN(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
LLVM_ABI std::optional< bool > isBoolConstant(SDValue N) const
Check if a value \op N is a constant using the target's BooleanContent for its type.
LLVM_ABI SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
const TargetSubtargetInfo & getSubtarget() const
LLVM_ABI ConstantRange computeConstantRange(SDValue Op, bool ForSigned, unsigned Depth=0) const
Determine the possible constant range of an integer or vector of integers.
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI void updateDivergence(SDNode *N)
LLVM_ABI SDValue getSplatValue(SDValue V, bool LegalTypes=false)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Value, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo)
LLVM_ABI SDValue getAtomicLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT MemVT, EVT VT, SDValue Chain, SDValue Ptr, MachineMemOperand *MMO)
LLVM_ABI SDNode * getNodeIfExists(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops, const SDNodeFlags Flags, bool AllowCommute=false)
Get the specified node if it's already available, or else return NULL.
LLVM_ABI SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid, uint64_t Index, uint32_t Attr)
Creates a PseudoProbeSDNode with function GUID Guid and the index of the block Index it is probing,...
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
LLVM_ABI SDNode * SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT)
These are used for target selectors to mutate the specified node to have the specified return type,...
LLVM_ABI void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE, Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, const LibcallLoweringInfo *LibcallsInfo, UniformityInfo *UA, ProfileSummaryInfo *PSIin, BlockFrequencyInfo *BFIin, MachineModuleInfo &MMI, FunctionVarLocs const *FnVarLocs)
Prepare this SelectionDAG to process code in the given MachineFunction.
LLVM_ABI SelectionDAG(const TargetMachine &TM, CodeGenOptLevel)
LLVM_ABI SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getBitcastedSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
LLVM_ABI SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachineMemOperand *MMO)
Gets a node for an atomic cmpxchg op.
LLVM_ABI SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain)
If an existing load has uses of its chain, create a token factor node with that chain and the new mem...
LLVM_ABI bool isConstantIntBuildVectorOrConstantInt(SDValue N, bool AllowOpaques=true) const
Test whether the given value is a constant int or similar node.
LLVM_ABI void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, unsigned Num)
Like ReplaceAllUsesOfValueWith, but for multiple values at once.
LLVM_ABI SDValue getJumpTableDebugInfo(int JTI, SDValue Chain, const SDLoc &DL)
LLVM_ABI SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
LLVM_ABI std::optional< unsigned > getValidMaximumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
LLVM_ABI SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
OverflowKind
Used to represent the possible overflow behavior of an operation.
static LLVM_ABI unsigned getHasPredecessorMaxSteps()
LLVM_ABI bool haveNoCommonBitsSet(SDValue A, SDValue B) const
Return true if A and B have no common bits set.
SDValue getExtractSubvector(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Return the VT typed sub-vector of Vec at Idx.
LLVM_ABI bool cannotBeOrderedNegativeFP(SDValue Op) const
Test whether the given float value is known to be positive.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI bool calculateDivergence(SDNode *N)
LLVM_ABI std::pair< SDValue, SDValue > getStrcmp(SDValue Chain, const SDLoc &dl, SDValue S0, SDValue S1, const CallInst *CI)
Lower a strcmp operation into a target library call and return the resulting chain and call result as...
LLVM_ABI SDValue getGetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
LLVM_ABI SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A)
Return an AssertAlignSDNode.
LLVM_ABI SDNode * mutateStrictFPToFP(SDNode *Node)
Mutate the specified strict FP node to its non-strict equivalent, unlinking the node from its chain a...
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI bool canIgnoreSignBitOfZero(const SDUse &Use) const
Check if a use of a float value is insensitive to signed zeros.
LLVM_ABI bool SignBitIsZeroFP(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero, for a floating-point value.
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getInsertSubvector(const SDLoc &DL, SDValue Vec, SDValue SubVec, unsigned Idx)
Insert SubVec at the Idx element of Vec.
LLVM_ABI SDValue getBitcastedZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
LLVM_ABI SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false, SDNodeFlags Flags={})
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI bool shouldOptForSize() const
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
LLVM_ABI SDValue getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be an integer vector, to the vector-type VT,...
const TargetLowering & getTargetLoweringInfo() const
LLVM_ABI bool isEqualTo(SDValue A, SDValue B) const
Test whether two SDValues are known to compare equal.
static constexpr unsigned MaxRecursionDepth
LLVM_ABI SDValue getStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
bool isGuaranteedNotToBePoison(SDValue Op, unsigned Depth=0) const
Return true if this function can prove that Op is never poison.
LLVM_ABI SDValue getIdentityElement(unsigned Opcode, const SDLoc &DL, EVT VT, SDNodeFlags Flags)
Get the (commutative) identity element for the given opcode, if it exists.
LLVM_ABI SDValue expandVACopy(SDNode *Node)
Expand the specified ISD::VACOPY node as the Legalize pass would.
LLVM_ABI SDValue getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI APInt computeVectorKnownZeroElements(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
For each demanded element of a vector, see if it is known to be zero.
LLVM_ABI void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
bool NewNodesMustHaveLegalTypes
When true, additional steps are taken to ensure that getConstant() and similar functions return DAG n...
LLVM_ABI std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
LLVM_ABI void salvageDebugInfo(SDNode &N)
To be invoked on an SDNode that is slated to be erased.
LLVM_ABI SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
LLVM_ABI std::pair< SDValue, SDValue > UnrollVectorOverflowOp(SDNode *N, unsigned ResNE=0)
Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes.
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
LLVM_ABI SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcastedAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
LLVM_ABI bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
LLVM_ABI void DeleteNode(SDNode *N)
Remove the specified node from the system.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
LLVM_ABI SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
LLVM_ABI std::pair< SDValue, SDValue > getStrcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, const CallInst *CI)
Lower a strcpy operation into a target library call and return the resulting chain and call result as...
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
LLVM_ABI std::optional< unsigned > getValidShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has a uniform shift amount that is less than the element bit-width of the shi...
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
LLVM_ABI SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal)
Try to simplify a select/vselect into 1 of its operands or a constant.
LLVM_ABI SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
LLVM_ABI bool isConstantFPBuildVectorOrConstantFP(SDValue N) const
Test whether the given value is a constant FP or similar node.
const DataLayout & getDataLayout() const
SDValue getPartialReduceMLS(unsigned Opc, const SDLoc &DL, SDValue Acc, SDValue LHS, SDValue RHS)
Get an expression that implements a partial multiply-subtract reduction.
LLVM_ABI SDValue expandVAArg(SDNode *Node)
Expand the specified ISD::VAARG node as the Legalize pass would.
LLVM_ABI SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
LLVM_ABI bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops)
Check if a node exists without modifying its flags.
LLVM_ABI ConstantRange computeConstantRangeIncludingKnownBits(SDValue Op, bool ForSigned, unsigned Depth=0) const
Combine constant ranges from computeConstantRange() and computeKnownBits().
const SelectionDAGTargetInfo & getSelectionDAGInfo() const
LLVM_ABI bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base, unsigned Bytes, int Dist) const
Return true if loads are next to each other and can be merged.
LLVM_ABI SDValue getMaskedHistogram(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
LLVM_ABI SDDbgLabel * getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O)
Creates a SDDbgLabel node.
LLVM_ABI SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
LLVM_ABI OverflowKind computeOverflowForUnsignedMul(SDValue N0, SDValue N1) const
Determine if the result of the unsigned mul of 2 nodes can overflow.
LLVM_ABI void copyExtraInfo(SDNode *From, SDNode *To)
Copy extra info associated with one node to another.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue SV, unsigned Align)
VAArg produces a result and token chain, and takes a pointer and a source value as input.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getLoadFFVP(EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL, MachineMemOperand *MMO)
LLVM_ABI SDValue getTypeSize(const SDLoc &DL, EVT VT, TypeSize TS)
LLVM_ABI SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
LLVM_ABI void clear()
Clear state and free memory necessary to make this SelectionDAG ready to process a new block.
LLVM_ABI std::pair< SDValue, SDValue > getMemcmp(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, const CallInst *CI)
Lower a memcmp operation into a target library call and return the resulting chain and call result as...
LLVM_ABI void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
LLVM_ABI SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
LLVM_ABI std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
LLVM_ABI SDValue makeStateFunctionCall(unsigned LibFunc, SDValue Ptr, SDValue InChain, const SDLoc &DLoc)
Helper used to make a call to a library function that has one argument of pointer type.
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
LLVM_ABI SDValue getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getSrcValue(const Value *v)
Construct a node to track a Value* through the backend.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
LLVM_ABI SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
LLVM_ABI OverflowKind computeOverflowForSignedMul(SDValue N0, SDValue N1) const
Determine if the result of the signed mul of 2 nodes can overflow.
LLVM_ABI MaybeAlign InferPtrAlign(SDValue Ptr) const
Infer alignment of a load / store address.
LLVM_ABI void dump() const
Dump the textual format of this DAG.
LLVM_ABI bool MaskedValueIsAllOnes(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if '(Op & Mask) == Mask'.
LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
LLVM_ABI void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
LLVM_ABI void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
LLVM_ABI void AddDbgLabel(SDDbgLabel *DB)
Add a dbg_label SDNode.
bool isConstantValueOfAnyType(SDValue N) const
LLVM_ABI bool canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts, UndefPoisonKind Kind=UndefPoisonKind::UndefOrPoison, bool ConsiderFlags=true, unsigned Depth=0) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
LLVM_ABI SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ABI SDDbgValue * getVRegDbgValue(DIVariable *Var, DIExpression *Expr, Register VReg, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a VReg SDDbgValue node.
LLVM_ABI KnownFPClass computeKnownFPClass(SDValue Op, FPClassTest InterestedClasses, unsigned Depth=0) const
Determine floating-point class information about Op.
LLVM_ABI bool isIdentityElement(unsigned Opc, SDNodeFlags Flags, SDValue V, unsigned OperandNo, unsigned Depth=0) const
Returns true if V is an identity element of Opc with Flags.
LLVM_ABI SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
LLVM_ABI SDValue getIndexedStoreVP(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, UndefPoisonKind Kind=UndefPoisonKind::UndefOrPoison, unsigned Depth=0) const
Return true if this function can prove that Op is never poison and, Kind can be used to track poison ...
LLVM_ABI bool isKnownNeverZero(SDValue Op, unsigned Depth=0) const
Test whether the given SDValue is known to contain non-zero value(s).
LLVM_ABI SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
LLVM_ABI std::optional< unsigned > getValidMinimumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI SDValue getSetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
LLVM_ABI SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
LLVM_ABI SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI std::pair< SDValue, SDValue > getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT)
Convert Op, which must be a STRICT operation of float type, to the float type VT, by either extending...
LLVM_ABI std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
LLVM_ABI SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
LLVM_ABI SDValue getVPLogicalNOT(const SDLoc &DL, SDValue Val, SDValue Mask, SDValue EVL, EVT VT)
Create a vector-predicated logical NOT operation as (VP_XOR Val, BooleanOne, Mask,...
LLVM_ABI SDValue getMaskFromElementCount(const SDLoc &DL, EVT VT, ElementCount Len)
Return a vector with the first 'Len' lanes set to true and remaining lanes set to false.
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
iterator_range< allnodes_iterator > allnodes()
LLVM_ABI SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, bool isTarget=false, unsigned TargetFlags=0)
LLVM_ABI SDValue WidenVector(const SDValue &N, const SDLoc &DL)
Widen the vector up to the next power of two using INSERT_SUBVECTOR.
const LibcallLoweringInfo & getLibcalls() const
LLVM_ABI SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDDbgValue * getConstantDbgValue(DIVariable *Var, DIExpression *Expr, const Value *C, const DebugLoc &DL, unsigned O)
Creates a constant SDDbgValue node.
LLVM_ABI SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain, int FrameIndex)
Creates a LifetimeSDNode that starts (IsStart==true) or ends (IsStart==false) the lifetime of the Fra...
ArrayRef< SDDbgValue * > GetDbgValues(const SDNode *SD) const
Get the debug values which reference the given SDNode.
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI OverflowKind computeOverflowForSignedAdd(SDValue N0, SDValue N1) const
Determine if the result of the signed addition of 2 nodes can overflow.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
LLVM_ABI unsigned AssignTopologicalOrder()
Topological-sort the AllNodes list and a assign a unique node id for each node in the DAG based on th...
ilist< SDNode >::size_type allnodes_size() const
LLVM_ABI bool isKnownNeverNaN(SDValue Op, const APInt &DemandedElts, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN in...
LLVM_ABI SDValue FoldConstantBuildVector(BuildVectorSDNode *BV, const SDLoc &DL, EVT DstEltVT)
Fold BUILD_VECTOR of constants/undefs to the destination type BUILD_VECTOR of constants/undefs elemen...
LLVM_ABI SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
LLVM_ABI SDValue getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getTruncStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, bool IsCompressing=false)
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
LLVM_ABI bool MaskedVectorIsZero(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
Return true if 'Op' is known to be zero in DemandedElts.
LLVM_ABI SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
LLVM_ABI SDDbgValue * getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr, unsigned FI, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a FrameIndex SDDbgValue node.
LLVM_ABI SDValue getExtStridedLoadVP(ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT, SDValue Chain, SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
LLVM_ABI SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
LLVM_ABI SDValue getJumpTable(int JTI, EVT VT, bool isTarget=false, unsigned TargetFlags=0)
LLVM_ABI bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
LLVM_ABI SDValue getVPPtrExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be of integer type, to the vector-type integer type VT,...
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI void getTopologicallyOrderedNodes(SmallVectorImpl< const SDNode * > &SortedNodes) const
Get all the nodes in their topological order without modifying any states.
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
LLVM_ABI std::pair< SDValue, SDValue > getStrstr(SDValue Chain, const SDLoc &dl, SDValue S0, SDValue S1, const CallInst *CI)
Lower a strstr operation into a target library call and return the resulting chain and call result as...
LLVM_ABI SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to extend the Op as a pointer value assuming it was the smaller SrcTy ...
LLVM_ABI OverflowKind computeOverflowForUnsignedAdd(SDValue N0, SDValue N1) const
Determine if the result of the unsigned addition of 2 nodes can overflow.
SDValue getPOISON(EVT VT)
Return a POISON node. POISON does not have a useful SDLoc.
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getTruncStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT SVT, MachineMemOperand *MMO, bool IsCompressing=false)
LLVM_ABI void canonicalizeCommutativeBinop(unsigned Opcode, SDValue &N1, SDValue &N2) const
Swap N1 and N2 if Opcode is a commutative binary opcode and the canonical form expects the opposite o...
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI SDValue getCondCode(ISD::CondCode Cond)
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVM_ABI bool isKnownToBeAPowerOfTwoFP(SDValue Val, unsigned Depth=0) const
Test if the given fp value is known to be an integer power-of-2, either positive or negative.
LLVM_ABI OverflowKind computeOverflowForSignedSub(SDValue N0, SDValue N1) const
Determine if the result of the signed sub of 2 nodes can overflow.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
LLVM_ABI SDValue simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y, SDNodeFlags Flags)
Try to simplify a floating-point binary operation into 1 of its operands or a constant.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI bool isKnownToBeAPowerOfTwo(SDValue Val, bool OrZero=false, unsigned Depth=0) const
Test if the given value is known to have exactly one bit set.
LLVM_ABI SDValue getDeactivationSymbol(const GlobalValue *GV)
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
LLVM_ABI bool isUndef(unsigned Opcode, ArrayRef< SDValue > Ops)
Return true if the result of this operation is always undefined.
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
LLVM_ABI SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
LLVM_ABI std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
LLVM_ABI SDValue foldConstantFPMath(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops)
Fold floating-point operations when all operands are constants and/or undefined.
LLVM_ABI std::optional< ConstantRange > getValidShiftAmountRange(SDValue V, const APInt &DemandedElts, unsigned Depth) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI SDValue FoldSymbolOffset(unsigned Opcode, EVT VT, const GlobalAddressSDNode *GA, const SDNode *N2)
LLVM_ABI SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI SDDbgValue * getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N, unsigned R, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a SDDbgValue node.
LLVM_ABI SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
DenormalMode getDenormalMode(EVT VT) const
Return the current function's default denormal handling kind for the given floating point type.
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
LLVM_ABI SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, ArrayRef< ISD::NodeType > CandidateBinOps, bool AllowPartials=false)
Match a binop + shuffle pyramid that represents a horizontal reduction over the elements of a vector ...
LLVM_ABI bool isADDLike(SDValue Op, bool NoWrap=false) const
Return true if the specified operand is an ISD::OR or ISD::XOR node that can be treated as an ISD::AD...
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
LLVM_ABI SDValue simplifyShift(SDValue X, SDValue Y)
Try to simplify a shift into 1 of its operands or a constant.
LLVM_ABI void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits=0, unsigned SizeInBits=0, bool InvalidateDbg=true)
Transfer debug values from one node to another, while optionally generating fragment expressions for ...
LLVM_ABI SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
LLVM_ABI SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTruncating=false)
ilist< SDNode >::iterator allnodes_iterator
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
int getMaskElt(unsigned Idx) const
ArrayRef< int > getMask() const
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
static LLVM_ABI bool isSplatMask(ArrayRef< int > Mask)
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
constexpr const char * data() const
Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:138
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Completely target-dependent object reference.
unsigned getTargetFlags() const
Provides information about what library functions are available for the current target.
virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const
Return true if it is beneficial to convert a load of a constant to just the constant itself.
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
unsigned getMaxStoresPerMemcpy(bool OptSize) const
Get maximum # of store operations permitted for llvm.memcpy.
unsigned getMaxStoresPerMemset(bool OptSize) const
Get maximum # of store operations permitted for llvm.memset.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
virtual bool shallExtractConstSplatVectorElementToStore(Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const
Return true if the target shall perform extract vector element and store given that the vector is kno...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
BooleanContent
Enum that describes how the target represents true/false values.
virtual unsigned getMaxGluedStoresPerMemcpy() const
Get maximum # of store operations to be glued together.
std::vector< ArgListEntry > ArgListTy
unsigned getMaxStoresPerMemmove(bool OptSize) const
Get maximum # of store operations permitted for llvm.memmove.
virtual bool isLegalStoreImmediate(int64_t Value) const
Return true if the specified immediate is legal for the value input of a store instruction.
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool findOptimalMemOpLowering(LLVMContext &Context, std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes, EVT *LargestVT=nullptr) const
Determines the optimal series of memory ops to replace the memset / memcpy.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
Primary interface to the complete machine description for the target machine.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
const Triple & getTargetTriple() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const SelectionDAGTargetInfo * getSelectionDAGInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Definition Triple.h:645
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:313
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:286
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:311
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI unsigned getOperandNo() const
Return the operand # of this use in its User.
Definition Use.cpp:35
LLVM_ABI void set(Value *Val)
Definition Value.h:883
User * getUser() const
Returns the User that contains this Use.
Definition Use.h:61
Value * getOperand(unsigned i) const
Definition User.h:207
This class is used to represent an VP_GATHER node.
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
Definition TypeSize.h:269
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:230
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
Definition TypeSize.h:176
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:237
A raw_ostream that writes to an std::string.
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt clmulr(const APInt &LHS, const APInt &RHS)
Perform a reversed carry-less multiply.
Definition APInt.cpp:3253
LLVM_ABI APInt mulhu(const APInt &C1, const APInt &C2)
Performs (2*N)-bit multiplication on zero-extended operands.
Definition APInt.cpp:3183
LLVM_ABI APInt avgCeilU(const APInt &C1, const APInt &C2)
Compute the ceil of the unsigned average of C1 and C2.
Definition APInt.cpp:3170
LLVM_ABI APInt avgFloorU(const APInt &C1, const APInt &C2)
Compute the floor of the unsigned average of C1 and C2.
Definition APInt.cpp:3160
LLVM_ABI APInt fshr(const APInt &Hi, const APInt &Lo, const APInt &Shift)
Perform a funnel shift right.
Definition APInt.cpp:3234
LLVM_ABI APInt mulhs(const APInt &C1, const APInt &C2)
Performs (2*N)-bit multiplication on sign-extended operands.
Definition APInt.cpp:3175
LLVM_ABI APInt clmul(const APInt &LHS, const APInt &RHS)
Perform a carry-less multiply, also known as XOR multiplication, and return low-bits.
Definition APInt.cpp:3243
APInt abds(const APInt &A, const APInt &B)
Determine the absolute difference of two APInts considered to be signed.
Definition APInt.h:2297
LLVM_ABI APInt fshl(const APInt &Hi, const APInt &Lo, const APInt &Shift)
Perform a funnel shift left.
Definition APInt.cpp:3225
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
Definition APInt.cpp:3061
LLVM_ABI APInt clmulh(const APInt &LHS, const APInt &RHS)
Perform a carry-less multiply, and return high-bits.
Definition APInt.cpp:3258
APInt abdu(const APInt &A, const APInt &B)
Determine the absolute difference of two APInts considered to be unsigned.
Definition APInt.h:2302
LLVM_ABI APInt avgFloorS(const APInt &C1, const APInt &C2)
Compute the floor of the signed average of C1 and C2.
Definition APInt.cpp:3155
LLVM_ABI APInt avgCeilS(const APInt &C1, const APInt &C2)
Compute the ceil of the signed average of C1 and C2.
Definition APInt.cpp:3165
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
Definition ISDOpcodes.h:24
LLVM_ABI CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical AND between different comparisons of identical values: ((X op1 Y) & (X...
LLVM_ABI bool isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are ~0 ...
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition ISDOpcodes.h:41
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:823
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition ISDOpcodes.h:261
@ TargetConstantPool
Definition ISDOpcodes.h:189
@ MDNODE_SDNODE
MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to reference metadata in the IR.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
Definition ISDOpcodes.h:511
@ PTRADD
PTRADD represents pointer arithmetic semantics, for targets that opt in using shouldPreservePtrArith(...
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
Definition ISDOpcodes.h:45
@ POISON
POISON - A poison node.
Definition ISDOpcodes.h:236
@ PARTIAL_REDUCE_SMLA
PARTIAL_REDUCE_[U|S]MLA(Accumulator, Input1, Input2) The partial reduction nodes sign or zero extend ...
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ MLOAD
Masked load and store - consecutive vector load and store operations with additional mask operand tha...
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
Definition ISDOpcodes.h:538
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:275
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
Definition ISDOpcodes.h:600
@ JUMP_TABLE_DEBUG_INFO
JUMP_TABLE_DEBUG_INFO - Jumptable debug info.
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:783
@ TargetBlockAddress
Definition ISDOpcodes.h:191
@ DEACTIVATION_SYMBOL
Untyped node storing deactivation symbol reference (DeactivationSymbolSDNode).
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:294
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
Definition ISDOpcodes.h:522
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:264
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:857
@ ATOMIC_LOAD_USUB_COND
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:518
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition ISDOpcodes.h:220
@ GlobalAddress
Definition ISDOpcodes.h:88
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:884
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition ISDOpcodes.h:584
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:417
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition ISDOpcodes.h:747
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
Definition ISDOpcodes.h:914
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
Definition ISDOpcodes.h:528
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition ISDOpcodes.h:997
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition ISDOpcodes.h:254
@ CLMUL
Carry-less multiplication operations.
Definition ISDOpcodes.h:778
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ GlobalTLSAddress
Definition ISDOpcodes.h:89
@ SRCVALUE
SRCVALUE - This is a node type that holds a Value* that is used to make reference to a value in the L...
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
@ ATOMIC_LOAD_USUB_SAT
@ CTLZ_ZERO_POISON
Definition ISDOpcodes.h:792
@ PARTIAL_REDUCE_UMLA
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:848
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
Definition ISDOpcodes.h:715
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
Definition ISDOpcodes.h:665
@ TargetExternalSymbol
Definition ISDOpcodes.h:190
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ TargetJumpTable
Definition ISDOpcodes.h:188
@ TargetIndex
TargetIndex - Like a constant pool entry, but with completely target-dependent semantics.
Definition ISDOpcodes.h:198
@ PARTIAL_REDUCE_FMLA
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ TRUNCATE_SSAT_U
Definition ISDOpcodes.h:877
@ SETCCCARRY
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
Definition ISDOpcodes.h:831
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
Definition ISDOpcodes.h:352
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
Definition ISDOpcodes.h:691
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
Definition ISDOpcodes.h:541
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
Definition ISDOpcodes.h:548
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition ISDOpcodes.h:374
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:800
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
Definition ISDOpcodes.h:233
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
Definition ISDOpcodes.h:247
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition ISDOpcodes.h:672
@ AssertAlign
AssertAlign - These nodes record if a register contains a value that has a known alignment and the tr...
Definition ISDOpcodes.h:69
@ GET_ACTIVE_LANE_MASK
GET_ACTIVE_LANE_MASK - this corrosponds to the llvm.get.active.lane.mask intrinsic.
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition ISDOpcodes.h:230
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition ISDOpcodes.h:348
@ TargetGlobalAddress
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
Definition ISDOpcodes.h:185
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ CTLS
Count leading redundant sign bits.
Definition ISDOpcodes.h:796
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:704
@ ATOMIC_LOAD_FMAXIMUM
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:769
@ AssertNoFPClass
AssertNoFPClass - These nodes record if a register contains a float value that is known to be not som...
Definition ISDOpcodes.h:78
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition ISDOpcodes.h:649
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition ISDOpcodes.h:614
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
Definition ISDOpcodes.h:48
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition ISDOpcodes.h:576
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
Definition ISDOpcodes.h:224
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:854
@ TargetConstantFP
Definition ISDOpcodes.h:180
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:815
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
Definition ISDOpcodes.h:386
@ SMULO
Same for multiplication.
Definition ISDOpcodes.h:356
@ ATOMIC_LOAD_FMINIMUM
@ TargetFrameIndex
Definition ISDOpcodes.h:187
@ VECTOR_SPLICE_LEFT
VECTOR_SPLICE_LEFT(VEC1, VEC2, OFFSET) - Shifts CONCAT_VECTORS(VEC1, VEC2) left by OFFSET elements an...
Definition ISDOpcodes.h:653
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
Definition ISDOpcodes.h:903
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:892
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition ISDOpcodes.h:727
@ MASKED_UDIV
Masked vector arithmetic that returns poison on disabled lanes.
@ LIFETIME_START
This corresponds to the llvm.lifetime.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition ISDOpcodes.h:982
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition ISDOpcodes.h:809
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:328
@ MGATHER
Masked gather and scatter - load and store operations for a vector of random addresses with additiona...
@ HANDLENODE
HANDLENODE node - Used as a handle for various purposes.
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ ATOMIC_LOAD_UDEC_WRAP
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
Definition ISDOpcodes.h:500
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:930
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
Definition ISDOpcodes.h:179
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition ISDOpcodes.h:505
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:739
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition ISDOpcodes.h:205
@ GET_FPENV_MEM
Gets the current floating-point environment.
@ PSEUDO_PROBE
Pseudo probe for AutoFDO, as a place holder in a basic block to improve the sample counts quality.
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
Definition ISDOpcodes.h:735
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
Definition ISDOpcodes.h:710
@ VECTOR_SPLICE_RIGHT
VECTOR_SPLICE_RIGHT(VEC1, VEC2, OFFSET) - Shifts CONCAT_VECTORS(VEC1,VEC2) right by OFFSET elements a...
Definition ISDOpcodes.h:657
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:304
@ SPLAT_VECTOR_PARTS
SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the scalar values joined together a...
Definition ISDOpcodes.h:681
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
Definition ISDOpcodes.h:241
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition ISDOpcodes.h:565
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition ISDOpcodes.h:53
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ CTTZ_ZERO_POISON
Bit counting operators with a poisoned result for zero inputs.
Definition ISDOpcodes.h:791
@ ExternalSymbol
Definition ISDOpcodes.h:93
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition ISDOpcodes.h:963
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
Definition ISDOpcodes.h:699
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
Definition ISDOpcodes.h:925
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ EXPERIMENTAL_VECTOR_HISTOGRAM
Experimental vector histogram intrinsic Operands: Input Chain, Inc, Mask, Base, Index,...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Definition ISDOpcodes.h:949
@ VECREDUCE_FMINIMUM
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:860
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ VECREDUCE_SEQ_FMUL
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:837
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition ISDOpcodes.h:62
@ ATOMIC_LOAD_UINC_WRAP
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:534
@ PARTIAL_REDUCE_SUMLA
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition ISDOpcodes.h:365
@ SET_FPENV_MEM
Sets the current floating point environment.
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ TRUNCATE_SSAT_S
TRUNCATE_[SU]SAT_[SU] - Truncate for saturated operand [SU] located in middle, prefix for SAT means i...
Definition ISDOpcodes.h:875
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
Definition ISDOpcodes.h:722
@ TRUNCATE_USAT_U
Definition ISDOpcodes.h:879
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:338
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition ISDOpcodes.h:213
@ TargetGlobalTLSAddress
Definition ISDOpcodes.h:186
@ ABS_MIN_POISON
ABS with a poison result for INT_MIN.
Definition ISDOpcodes.h:751
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition ISDOpcodes.h:556
LLVM_ABI NodeType getOppositeSignednessMinMaxOpcode(unsigned MinMaxOpc)
Given a MinMaxOpc of ISD::(U|S)MIN or ISD::(U|S)MAX, returns the corresponding opcode with the opposi...
LLVM_ABI bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
LLVM_ABI NodeType getExtForLoadExtType(bool IsFP, LoadExtType)
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
bool isExtOpcode(unsigned Opcode)
LLVM_ABI bool isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are 0 o...
LLVM_ABI NodeType getUnmaskedBinOpOpcode(unsigned MaskedOpc)
Given a MaskedOpc of ISD::MASKED_(U|S)(DIV|REM), returns the unmasked ISD::(U|S)(DIV|REM).
LLVM_ABI bool isVectorShrinkable(const SDNode *N, unsigned NewEltSize, bool Signed)
Returns true if the specified node is a vector where all elements can be truncated to the specified e...
LLVM_ABI bool isVPBinaryOp(unsigned Opcode)
Whether this is a vector-predicated binary operation opcode.
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
LLVM_ABI std::optional< unsigned > getBaseOpcodeForVP(unsigned Opcode, bool hasFPExcept)
Translate this VP Opcode to its corresponding non-VP Opcode.
bool isBitwiseLogicOp(unsigned Opcode)
Whether this is bitwise logic opcode.
bool isTrueWhenEqual(CondCode Cond)
Return true if the specified condition returns true if the two operands to the condition are equal.
LLVM_ABI std::optional< unsigned > getVPMaskIdx(unsigned Opcode)
The operand position of the vector mask.
unsigned getUnorderedFlavor(CondCode Cond)
This function returns 0 if the condition is always false if an operand is a NaN, 1 if the condition i...
LLVM_ABI std::optional< unsigned > getVPExplicitVectorLengthIdx(unsigned Opcode)
The operand position of the explicit vector length parameter.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
LLVM_ABI bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
LLVM_ABI bool isFreezeUndef(const SDNode *N)
Return true if the specified node is FREEZE(UNDEF).
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
LLVM_ABI std::optional< unsigned > getVPForBaseOpcode(unsigned Opcode)
Translate this non-VP Opcode to its corresponding VP Opcode.
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
LLVM_ABI bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool matchUnaryPredicateImpl(SDValue Op, std::function< bool(ConstNodeType *)> Match, bool AllowUndefs=false, bool AllowTruncation=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant BUI...
LLVM_ABI bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
LLVM_ABI NodeType getInverseMinMaxOpcode(unsigned MinMaxOpc)
Given a MinMaxOpc of ISD::(U|S)MIN or ISD::(U|S)MAX, returns ISD::(U|S)MAX and ISD::(U|S)MIN,...
LLVM_ABI bool matchBinaryPredicate(SDValue LHS, SDValue RHS, std::function< bool(ConstantSDNode *, ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTypeMismatch=false)
Attempt to match a binary predicate against a pair of scalar/splat constants or every element of a pa...
LLVM_ABI bool isVPReduction(unsigned Opcode)
Whether this is a vector-predicated reduction opcode.
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTruncation=false)
Hook for matching ConstantSDNode predicate.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LLVM_ABI bool isBuildVectorOfConstantFPSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantFPSDNode or undef.
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LLVM_ABI bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef.
LLVM_ABI NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
LLVM_ABI bool isVPOpcode(unsigned Opcode)
Whether this is a vector-predicated Opcode.
LLVM_ABI CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical OR between different comparisons of identical values: ((X op1 Y) | (X ...
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
match_deferred< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
auto m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
LLVM_ABI Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
LLVM_ABI Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
LLVM_ABI Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given e...
bool sd_match(SDNode *N, const SelectionDAG *DAG, Pattern &&P)
initializer< Ty > init(const Ty &Val)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
Definition Dwarf.h:149
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
This is an optimization pass for GlobalISel generic memory operations.
GenericUniformityInfo< SSAContext > UniformityInfo
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition MathExtras.h:344
@ Offset
Definition DWP.cpp:557
bool operator<(int64_t V1, const APSInt &V2)
Definition APSInt.h:360
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
Definition Analysis.cpp:237
void fill(R &&Range, T &&Value)
Provide wrappers to std::fill which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1758
LLVM_ABI SDValue peekThroughExtractSubvectors(SDValue V)
Return the non-extracted vector source operand of V if it exists.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
MaybeAlign getAlign(const CallInst &I, unsigned Index)
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
LLVM_ABI bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
Definition Utils.cpp:1569
LLVM_ABI SDValue getBitwiseNotOperand(SDValue V, SDValue Mask, bool AllowUndefs)
If V is a bitwise not, returns the inverted operand.
@ Undef
Value of the register doesn't matter.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2553
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition bit.h:315
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
LLVM_ABI bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, unsigned ElementSize, uint64_t Offset=0)
Returns true if the value V is a pointer into a ConstantDataArray.
LLVM_ABI bool isOneOrOneSplatFP(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant floating-point value, or a splatted vector of a constant float...
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
Definition bit.h:325
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
Definition APFloat.h:1740
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2207
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
Definition MathExtras.h:243
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:633
auto cast_or_null(const Y &Val)
Definition Casting.h:714
LLVM_ABI bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
Definition Utils.cpp:1551
LLVM_ABI bool isMinSignedConstant(SDValue V)
Returns true if V is a constant min signed integer value.
LLVM_ABI ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Definition APFloat.h:1652
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:204
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1745
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 maxNum semantics.
Definition APFloat.h:1695
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
LLVM_ABI bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
LLVM_ABI SDValue peekThroughInsertVectorElt(SDValue V, const APInt &DemandedElts)
Recursively peek through INSERT_VECTOR_ELT nodes, returning the source vector operand of V,...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void checkForCycles(const SelectionDAG *DAG, bool force=false)
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1635
LLVM_READONLY APFloat minimumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimumNumber semantics.
Definition APFloat.h:1726
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:209
LLVM_ABI SDValue peekThroughTruncates(SDValue V)
Return the non-truncated source operand of V if it exists.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1752
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
constexpr std::underlying_type_t< Enum > to_underlying(Enum E)
Returns underlying integer value of an enum.
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI SDValue peekThroughOneUseBitcasts(SDValue V)
Return the non-bitcasted and one-use source operand of V if it exists.
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
bool includesPoison(UndefPoisonKind Kind)
Returns true if Kind includes the Poison bit.
Definition UndefPoison.h:27
LLVM_ABI bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Other
Any other memory.
Definition ModRef.h:68
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
bool includesUndef(UndefPoisonKind Kind)
Returns true if Kind includes the Undef bit.
Definition UndefPoison.h:33
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 minNum semantics.
Definition APFloat.h:1676
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
LLVM_ABI bool isNullConstantOrUndef(SDValue V)
Returns true if V is a constant integer zero or an UNDEF node.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition Analysis.cpp:539
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1884
constexpr unsigned BitWidth
bool funcReturnsFirstArgOfCall(const CallInst &CI)
Returns true if the parent of CI returns CI's first argument after calling CI.
Definition Analysis.cpp:719
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isZeroOrZeroSplat(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
UndefPoisonKind
Enumeration to track whether we are interested in Undef, Poison, or both.
Definition UndefPoison.h:20
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition MathExtras.h:572
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition Alignment.h:197
LLVM_ABI bool isZeroOrZeroSplatFP(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant (+/-)0.0 floating-point value or a splatted vector thereof (wi...
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
Definition APFloat.h:1713
LLVM_READONLY APFloat maximumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximumNumber semantics.
Definition APFloat.h:1753
LLVM_ABI bool isOnesOrOnesSplat(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition MathExtras.h:373
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:876
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:763
MDNode * TBAAStruct
The tag for type-based alias analysis (tbaa struct).
Definition Metadata.h:783
MDNode * TBAA
The tag for type-based alias analysis.
Definition Metadata.h:780
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
Represents offset+length into a ConstantDataArray.
uint64_t Length
Length of the slice.
uint64_t Offset
Slice starts at this Offset.
void move(uint64_t Delta)
Moves the Offset and adjusts Length accordingly.
const ConstantDataArray * Array
ConstantDataArray pointer.
Extended Value Type.
Definition ValueTypes.h:35
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition ValueTypes.h:403
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:145
intptr_t getRawBits() const
Definition ValueTypes.h:528
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition ValueTypes.h:70
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
Definition ValueTypes.h:129
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition ValueTypes.h:292
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition ValueTypes.h:308
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition ValueTypes.h:155
ElementCount getVectorElementCount() const
Definition ValueTypes.h:358
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:381
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
Definition ValueTypes.h:367
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:393
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:324
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition ValueTypes.h:61
bool isFixedLengthVector() const
Definition ValueTypes.h:189
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:176
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition ValueTypes.h:331
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
Definition ValueTypes.h:300
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Definition ValueTypes.h:264
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
Definition ValueTypes.h:182
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition ValueTypes.h:336
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
Definition ValueTypes.h:150
LLVM_ABI const fltSemantics & getFltSemantics() const
Returns an APFloat semantics tag appropriate for the value type.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition ValueTypes.h:344
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
Definition ValueTypes.h:316
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
Definition ValueTypes.h:469
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:160
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
Definition KnownBits.h:315
LLVM_ABI KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
Definition KnownBits.h:269
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition KnownBits.h:106
bool isZero() const
Returns true if value is all zero.
Definition KnownBits.h:78
void makeNonNegative()
Make this value non-negative.
Definition KnownBits.h:125
static LLVM_ABI KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition KnownBits.h:256
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
static LLVM_ABI KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
Definition KnownBits.h:64
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
Definition KnownBits.h:288
static LLVM_ABI std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
void makeNegative()
Make this value negative.
Definition KnownBits.h:120
void setAllConflict()
Make all bits known to be both zero and one.
Definition KnownBits.h:97
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
Definition KnownBits.h:165
KnownBits byteSwap() const
Definition KnownBits.h:553
static LLVM_ABI KnownBits fshl(const KnownBits &LHS, const KnownBits &RHS, const APInt &Amt)
Compute known bits for fshl(LHS, RHS, Amt).
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition KnownBits.h:303
void setAllZero()
Make all bits known to be zero and discard any previous information.
Definition KnownBits.h:84
KnownBits reverseBits() const
Definition KnownBits.h:557
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
Definition KnownBits.h:247
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
Definition KnownBits.h:176
bool isConstant() const
Returns true if we know the value of all bits.
Definition KnownBits.h:54
void resetAll()
Resets the known state of all bits.
Definition KnownBits.h:72
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false, bool SelfAdd=false)
Compute knownbits resulting from addition of LHS and RHS.
Definition KnownBits.h:361
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
bool isNonZero() const
Returns true if this value is known to be non-zero.
Definition KnownBits.h:109
static LLVM_ABI KnownBits abdu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for abdu(LHS, RHS).
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
Definition KnownBits.h:239
static LLVM_ABI KnownBits avgFloorU(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgFloorU.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
Definition KnownBits.h:325
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
Definition KnownBits.h:184
static LLVM_ABI KnownBits computeForSubBorrow(const KnownBits &LHS, KnownBits RHS, const KnownBits &Borrow)
Compute known bits results from subtracting RHS from LHS with 1-bit Borrow.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
Definition KnownBits.h:200
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
Definition KnownBits.h:146
static LLVM_ABI KnownBits fshr(const KnownBits &LHS, const KnownBits &RHS, const APInt &Amt)
Compute known bits for fshr(LHS, RHS, Amt).
static LLVM_ABI KnownBits abds(KnownBits LHS, KnownBits RHS)
Compute known bits for abds(LHS, RHS).
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
static LLVM_ABI KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
static LLVM_ABI KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
Definition KnownBits.h:112
static LLVM_ABI KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
static LLVM_ABI KnownBits avgFloorS(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgFloorS.
static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS)
Return true if LHS and RHS have no common bits set.
Definition KnownBits.h:340
bool isNegative() const
Returns true if this value is known to be negative.
Definition KnownBits.h:103
LLVM_ABI KnownBits truncSSat(unsigned BitWidth) const
Truncate with signed saturation (signed input -> signed output)
static LLVM_ABI KnownBits computeForAddCarry(const KnownBits &LHS, const KnownBits &RHS, const KnownBits &Carry)
Compute known bits resulting from adding LHS, RHS and a 1-bit Carry.
Definition KnownBits.cpp:54
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
Definition KnownBits.h:376
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
Definition KnownBits.h:294
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
Definition KnownBits.h:233
static LLVM_ABI KnownBits avgCeilU(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgCeilU.
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
Definition KnownBits.h:171
static LLVM_ABI KnownBits clmul(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for clmul(LHS, RHS).
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
LLVM_ABI KnownBits truncUSat(unsigned BitWidth) const
Truncate with unsigned saturation (unsigned input -> unsigned output)
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
LLVM_ABI KnownBits truncSSatU(unsigned BitWidth) const
Truncate with signed saturation to unsigned (signed input -> unsigned output)
static LLVM_ABI KnownBits avgCeilS(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgCeilS.
const APInt & getConstant() const
Returns the value when all bits have a known value.
Definition KnownBits.h:58
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
void copysign(const KnownFPClass &Sign)
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a zero.
bool isUnknown() const
KnownFPClass intersectWith(const KnownFPClass &RHS) const
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
static LLVM_ABI KnownFPClass bitcast(const fltSemantics &FltSemantics, const KnownBits &Bits)
Report known values for a bitcast into a float with provided semantics.
This class contains a discriminated union of information about pointers in memory operands,...
LLVM_ABI bool isDereferenceable(unsigned Size, LLVMContext &C, const DataLayout &DL) const
Return true if memory region [V, V+Offset+Size) is known to be dereferenceable.
LLVM_ABI unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, bool IsZeroMemset, bool IsVolatile)
static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, Align SrcAlign, bool IsVolatile, bool MemcpyStrSrc=false)
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
unsigned int NumVTs
Clients of various APIs that cause global effects on the DAG can optionally implement this interface.
virtual void NodeDeleted(SDNode *N, SDNode *E)
The node N that was deleted and, if E is not null, an equivalent node E that replaced it.
virtual void NodeInserted(SDNode *N)
The node N that was inserted.
virtual void NodeUpdated(SDNode *N)
The node N that was updated.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setChain(SDValue InChain)