LLVM 23.0.0git
LegalizerHelper.cpp
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/LegalizerHelper.cpp -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file This file implements the LegalizerHelper class to legalize
10/// individual instructions and the LegalizeMachineIR wrapper pass for the
11/// primary legalization.
12//
13//===----------------------------------------------------------------------===//
14
36#include "llvm/Support/Debug.h"
40#include <cassert>
41#include <numeric>
42#include <optional>
43
44#define DEBUG_TYPE "legalizer"
45
46using namespace llvm;
47using namespace LegalizeActions;
48using namespace MIPatternMatch;
49
50/// Try to break down \p OrigTy into \p NarrowTy sized pieces.
51///
52/// Returns the number of \p NarrowTy elements needed to reconstruct \p OrigTy,
53/// with any leftover piece as type \p LeftoverTy
54///
55/// Returns -1 in the first element of the pair if the breakdown is not
56/// satisfiable.
57static std::pair<int, int>
58getNarrowTypeBreakDown(LLT OrigTy, LLT NarrowTy, LLT &LeftoverTy) {
59 assert(!LeftoverTy.isValid() && "this is an out argument");
60
61 unsigned Size = OrigTy.getSizeInBits();
62 unsigned NarrowSize = NarrowTy.getSizeInBits();
63 unsigned NumParts = Size / NarrowSize;
64 unsigned LeftoverSize = Size - NumParts * NarrowSize;
65 assert(Size > NarrowSize);
66
67 if (LeftoverSize == 0)
68 return {NumParts, 0};
69
70 if (NarrowTy.isVector()) {
71 unsigned EltSize = OrigTy.getScalarSizeInBits();
72 if (LeftoverSize % EltSize != 0)
73 return {-1, -1};
74 LeftoverTy = OrigTy.changeElementCount(
75 ElementCount::getFixed(LeftoverSize / EltSize));
76 } else {
77 LeftoverTy = LLT::integer(LeftoverSize);
78 }
79
80 int NumLeftover = LeftoverSize / LeftoverTy.getSizeInBits();
81 return std::make_pair(NumParts, NumLeftover);
82}
83
85
86 if (!Ty.isScalar())
87 return nullptr;
88
89 switch (Ty.getSizeInBits()) {
90 case 16:
91 return Type::getHalfTy(Ctx);
92 case 32:
93 return Type::getFloatTy(Ctx);
94 case 64:
95 return Type::getDoubleTy(Ctx);
96 case 80:
97 return Type::getX86_FP80Ty(Ctx);
98 case 128:
99 return Type::getFP128Ty(Ctx);
100 default:
101 return nullptr;
102 }
103}
104
107 MachineIRBuilder &Builder,
108 const LibcallLoweringInfo *Libcalls)
109 : MIRBuilder(Builder), Observer(Observer), MRI(MF.getRegInfo()),
110 LI(*MF.getSubtarget().getLegalizerInfo()),
111 TLI(*MF.getSubtarget().getTargetLowering()), Libcalls(Libcalls) {}
112
116 const LibcallLoweringInfo *Libcalls,
118 : MIRBuilder(B), Observer(Observer), MRI(MF.getRegInfo()), LI(LI),
119 TLI(*MF.getSubtarget().getTargetLowering()), Libcalls(Libcalls), VT(VT) {}
120
123 LostDebugLocObserver &LocObserver) {
124 LLVM_DEBUG(dbgs() << "\nLegalizing: " << MI);
125
126 MIRBuilder.setInstrAndDebugLoc(MI);
127
128 if (isa<GIntrinsic>(MI))
129 return LI.legalizeIntrinsic(*this, MI) ? Legalized : UnableToLegalize;
130 auto Step = LI.getAction(MI, MRI);
131 switch (Step.Action) {
132 case Legal:
133 LLVM_DEBUG(dbgs() << ".. Already legal\n");
134 return AlreadyLegal;
135 case Libcall:
136 LLVM_DEBUG(dbgs() << ".. Convert to libcall\n");
137 return libcall(MI, LocObserver);
138 case NarrowScalar:
139 LLVM_DEBUG(dbgs() << ".. Narrow scalar\n");
140 return narrowScalar(MI, Step.TypeIdx, Step.NewType);
141 case WidenScalar:
142 LLVM_DEBUG(dbgs() << ".. Widen scalar\n");
143 return widenScalar(MI, Step.TypeIdx, Step.NewType);
144 case Bitcast:
145 LLVM_DEBUG(dbgs() << ".. Bitcast type\n");
146 return bitcast(MI, Step.TypeIdx, Step.NewType);
147 case Lower:
148 LLVM_DEBUG(dbgs() << ".. Lower\n");
149 return lower(MI, Step.TypeIdx, Step.NewType);
150 case FewerElements:
151 LLVM_DEBUG(dbgs() << ".. Reduce number of elements\n");
152 return fewerElementsVector(MI, Step.TypeIdx, Step.NewType);
153 case MoreElements:
154 LLVM_DEBUG(dbgs() << ".. Increase number of elements\n");
155 return moreElementsVector(MI, Step.TypeIdx, Step.NewType);
156 case Custom:
157 LLVM_DEBUG(dbgs() << ".. Custom legalization\n");
158 return LI.legalizeCustom(*this, MI, LocObserver) ? Legalized
160 default:
161 LLVM_DEBUG(dbgs() << ".. Unable to legalize\n");
162 return UnableToLegalize;
163 }
164}
165
166void LegalizerHelper::insertParts(Register DstReg,
167 LLT ResultTy, LLT PartTy,
168 ArrayRef<Register> PartRegs,
169 LLT LeftoverTy,
170 ArrayRef<Register> LeftoverRegs) {
171 if (!LeftoverTy.isValid()) {
172 assert(LeftoverRegs.empty());
173
174 if (!ResultTy.isVector()) {
175 MIRBuilder.buildMergeLikeInstr(DstReg, PartRegs);
176 return;
177 }
178
179 if (PartTy.isVector())
180 MIRBuilder.buildConcatVectors(DstReg, PartRegs);
181 else
182 MIRBuilder.buildBuildVector(DstReg, PartRegs);
183 return;
184 }
185
186 // Merge sub-vectors with different number of elements and insert into DstReg.
187 if (ResultTy.isVector()) {
188 assert(LeftoverRegs.size() == 1 && "Expected one leftover register");
189 SmallVector<Register, 8> AllRegs(PartRegs);
190 AllRegs.append(LeftoverRegs.begin(), LeftoverRegs.end());
191 return mergeMixedSubvectors(DstReg, AllRegs);
192 }
193
194 SmallVector<Register> GCDRegs;
195 LLT GCDTy = getGCDType(getGCDType(ResultTy, LeftoverTy), PartTy);
196 for (auto PartReg : concat<const Register>(PartRegs, LeftoverRegs))
197 extractGCDType(GCDRegs, GCDTy, PartReg);
198 LLT ResultLCMTy = buildLCMMergePieces(ResultTy, LeftoverTy, GCDTy, GCDRegs);
199 buildWidenedRemergeToDst(DstReg, ResultLCMTy, GCDRegs);
200}
201
202void LegalizerHelper::appendVectorElts(SmallVectorImpl<Register> &Elts,
203 Register Reg) {
204 LLT Ty = MRI.getType(Reg);
206 extractParts(Reg, Ty.getScalarType(), Ty.getNumElements(), RegElts,
207 MIRBuilder, MRI);
208 Elts.append(RegElts);
209}
210
211/// Merge \p PartRegs with different types into \p DstReg.
212void LegalizerHelper::mergeMixedSubvectors(Register DstReg,
213 ArrayRef<Register> PartRegs) {
215 for (unsigned i = 0; i < PartRegs.size() - 1; ++i)
216 appendVectorElts(AllElts, PartRegs[i]);
217
218 Register Leftover = PartRegs[PartRegs.size() - 1];
219 if (!MRI.getType(Leftover).isVector())
220 AllElts.push_back(Leftover);
221 else
222 appendVectorElts(AllElts, Leftover);
223
224 MIRBuilder.buildMergeLikeInstr(DstReg, AllElts);
225}
226
227/// Append the result registers of G_UNMERGE_VALUES \p MI to \p Regs.
229 const MachineInstr &MI) {
230 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
231
232 const int StartIdx = Regs.size();
233 const int NumResults = MI.getNumOperands() - 1;
234 Regs.resize(Regs.size() + NumResults);
235 for (int I = 0; I != NumResults; ++I)
236 Regs[StartIdx + I] = MI.getOperand(I).getReg();
237}
238
239void LegalizerHelper::extractGCDType(SmallVectorImpl<Register> &Parts,
240 LLT GCDTy, Register SrcReg) {
241 LLT SrcTy = MRI.getType(SrcReg);
242 if (SrcTy == GCDTy) {
243 // If the source already evenly divides the result type, we don't need to do
244 // anything.
245 Parts.push_back(SrcReg);
246 } else {
247 // Need to split into common type sized pieces.
248 auto Unmerge = MIRBuilder.buildUnmerge(GCDTy, SrcReg);
249 getUnmergeResults(Parts, *Unmerge);
250 }
251}
252
253LLT LegalizerHelper::extractGCDType(SmallVectorImpl<Register> &Parts, LLT DstTy,
254 LLT NarrowTy, Register SrcReg) {
255 LLT SrcTy = MRI.getType(SrcReg);
256 LLT GCDTy = getGCDType(getGCDType(SrcTy, NarrowTy), DstTy);
257 extractGCDType(Parts, GCDTy, SrcReg);
258 return GCDTy;
259}
260
261LLT LegalizerHelper::buildLCMMergePieces(LLT DstTy, LLT NarrowTy, LLT GCDTy,
263 unsigned PadStrategy) {
264 LLT LCMTy = getLCMType(DstTy, NarrowTy);
265
266 int NumParts = LCMTy.getSizeInBits() / NarrowTy.getSizeInBits();
267 int NumSubParts = NarrowTy.getSizeInBits() / GCDTy.getSizeInBits();
268 int NumOrigSrc = VRegs.size();
269
270 Register PadReg;
271
272 // Get a value we can use to pad the source value if the sources won't evenly
273 // cover the result type.
274 if (NumOrigSrc < NumParts * NumSubParts) {
275 if (PadStrategy == TargetOpcode::G_ZEXT)
276 PadReg = MIRBuilder.buildConstant(GCDTy, 0).getReg(0);
277 else if (PadStrategy == TargetOpcode::G_ANYEXT)
278 PadReg = MIRBuilder.buildUndef(GCDTy).getReg(0);
279 else {
280 assert(PadStrategy == TargetOpcode::G_SEXT);
281
282 // Shift the sign bit of the low register through the high register.
283 auto ShiftAmt =
284 MIRBuilder.buildConstant(LLT::integer(64), GCDTy.getSizeInBits() - 1);
285 PadReg = MIRBuilder.buildAShr(GCDTy, VRegs.back(), ShiftAmt).getReg(0);
286 }
287 }
288
289 // Registers for the final merge to be produced.
290 SmallVector<Register, 4> Remerge(NumParts);
291
292 // Registers needed for intermediate merges, which will be merged into a
293 // source for Remerge.
294 SmallVector<Register, 4> SubMerge(NumSubParts);
295
296 // Once we've fully read off the end of the original source bits, we can reuse
297 // the same high bits for remaining padding elements.
298 Register AllPadReg;
299
300 // Build merges to the LCM type to cover the original result type.
301 for (int I = 0; I != NumParts; ++I) {
302 bool AllMergePartsArePadding = true;
303
304 // Build the requested merges to the requested type.
305 for (int J = 0; J != NumSubParts; ++J) {
306 int Idx = I * NumSubParts + J;
307 if (Idx >= NumOrigSrc) {
308 SubMerge[J] = PadReg;
309 continue;
310 }
311
312 SubMerge[J] = VRegs[Idx];
313
314 // There are meaningful bits here we can't reuse later.
315 AllMergePartsArePadding = false;
316 }
317
318 // If we've filled up a complete piece with padding bits, we can directly
319 // emit the natural sized constant if applicable, rather than a merge of
320 // smaller constants.
321 if (AllMergePartsArePadding && !AllPadReg) {
322 if (PadStrategy == TargetOpcode::G_ANYEXT)
323 AllPadReg = MIRBuilder.buildUndef(NarrowTy).getReg(0);
324 else if (PadStrategy == TargetOpcode::G_ZEXT)
325 AllPadReg = MIRBuilder.buildConstant(NarrowTy, 0).getReg(0);
326
327 // If this is a sign extension, we can't materialize a trivial constant
328 // with the right type and have to produce a merge.
329 }
330
331 if (AllPadReg) {
332 // Avoid creating additional instructions if we're just adding additional
333 // copies of padding bits.
334 Remerge[I] = AllPadReg;
335 continue;
336 }
337
338 if (NumSubParts == 1)
339 Remerge[I] = SubMerge[0];
340 else
341 Remerge[I] = MIRBuilder.buildMergeLikeInstr(NarrowTy, SubMerge).getReg(0);
342
343 // In the sign extend padding case, re-use the first all-signbit merge.
344 if (AllMergePartsArePadding && !AllPadReg)
345 AllPadReg = Remerge[I];
346 }
347
348 VRegs = std::move(Remerge);
349 return LCMTy;
350}
351
352void LegalizerHelper::buildWidenedRemergeToDst(Register DstReg, LLT LCMTy,
353 ArrayRef<Register> RemergeRegs) {
354 LLT DstTy = MRI.getType(DstReg);
355
356 // Create the merge to the widened source, and extract the relevant bits into
357 // the result.
358
359 if (DstTy == LCMTy) {
360 MIRBuilder.buildMergeLikeInstr(DstReg, RemergeRegs);
361 return;
362 }
363
364 auto Remerge = MIRBuilder.buildMergeLikeInstr(LCMTy, RemergeRegs);
365 if (DstTy.isScalar() && LCMTy.isScalar()) {
366 MIRBuilder.buildTrunc(DstReg, Remerge);
367 return;
368 }
369
370 if (LCMTy.isVector()) {
371 unsigned NumDefs = LCMTy.getSizeInBits() / DstTy.getSizeInBits();
372 SmallVector<Register, 8> UnmergeDefs(NumDefs);
373 UnmergeDefs[0] = DstReg;
374 for (unsigned I = 1; I != NumDefs; ++I)
375 UnmergeDefs[I] = MRI.createGenericVirtualRegister(DstTy);
376
377 MIRBuilder.buildUnmerge(UnmergeDefs,
378 MIRBuilder.buildMergeLikeInstr(LCMTy, RemergeRegs));
379 return;
380 }
381
382 llvm_unreachable("unhandled case");
383}
384
385static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) {
386#define RTLIBCASE_INT(LibcallPrefix) \
387 do { \
388 switch (Size) { \
389 case 32: \
390 return RTLIB::LibcallPrefix##32; \
391 case 64: \
392 return RTLIB::LibcallPrefix##64; \
393 case 128: \
394 return RTLIB::LibcallPrefix##128; \
395 default: \
396 llvm_unreachable("unexpected size"); \
397 } \
398 } while (0)
399
400#define RTLIBCASE(LibcallPrefix) \
401 do { \
402 switch (Size) { \
403 case 32: \
404 return RTLIB::LibcallPrefix##32; \
405 case 64: \
406 return RTLIB::LibcallPrefix##64; \
407 case 80: \
408 return RTLIB::LibcallPrefix##80; \
409 case 128: \
410 return RTLIB::LibcallPrefix##128; \
411 default: \
412 llvm_unreachable("unexpected size"); \
413 } \
414 } while (0)
415
416 switch (Opcode) {
417 case TargetOpcode::G_LROUND:
418 RTLIBCASE(LROUND_F);
419 case TargetOpcode::G_LLROUND:
420 RTLIBCASE(LLROUND_F);
421 case TargetOpcode::G_MUL:
422 RTLIBCASE_INT(MUL_I);
423 case TargetOpcode::G_SDIV:
424 RTLIBCASE_INT(SDIV_I);
425 case TargetOpcode::G_UDIV:
426 RTLIBCASE_INT(UDIV_I);
427 case TargetOpcode::G_SREM:
428 RTLIBCASE_INT(SREM_I);
429 case TargetOpcode::G_UREM:
430 RTLIBCASE_INT(UREM_I);
431 case TargetOpcode::G_CTLZ_ZERO_POISON:
432 RTLIBCASE_INT(CTLZ_I);
433 case TargetOpcode::G_FADD:
434 RTLIBCASE(ADD_F);
435 case TargetOpcode::G_FSUB:
436 RTLIBCASE(SUB_F);
437 case TargetOpcode::G_FMUL:
438 RTLIBCASE(MUL_F);
439 case TargetOpcode::G_FDIV:
440 RTLIBCASE(DIV_F);
441 case TargetOpcode::G_FEXP:
442 RTLIBCASE(EXP_F);
443 case TargetOpcode::G_FEXP2:
444 RTLIBCASE(EXP2_F);
445 case TargetOpcode::G_FEXP10:
446 RTLIBCASE(EXP10_F);
447 case TargetOpcode::G_FREM:
448 RTLIBCASE(REM_F);
449 case TargetOpcode::G_FPOW:
450 RTLIBCASE(POW_F);
451 case TargetOpcode::G_FPOWI:
452 RTLIBCASE(POWI_F);
453 case TargetOpcode::G_FMA:
454 RTLIBCASE(FMA_F);
455 case TargetOpcode::G_FSIN:
456 RTLIBCASE(SIN_F);
457 case TargetOpcode::G_FCOS:
458 RTLIBCASE(COS_F);
459 case TargetOpcode::G_FTAN:
460 RTLIBCASE(TAN_F);
461 case TargetOpcode::G_FASIN:
462 RTLIBCASE(ASIN_F);
463 case TargetOpcode::G_FACOS:
464 RTLIBCASE(ACOS_F);
465 case TargetOpcode::G_FATAN:
466 RTLIBCASE(ATAN_F);
467 case TargetOpcode::G_FATAN2:
468 RTLIBCASE(ATAN2_F);
469 case TargetOpcode::G_FSINH:
470 RTLIBCASE(SINH_F);
471 case TargetOpcode::G_FCOSH:
472 RTLIBCASE(COSH_F);
473 case TargetOpcode::G_FTANH:
474 RTLIBCASE(TANH_F);
475 case TargetOpcode::G_FSINCOS:
476 RTLIBCASE(SINCOS_F);
477 case TargetOpcode::G_FMODF:
478 RTLIBCASE(MODF_F);
479 case TargetOpcode::G_FLOG10:
480 RTLIBCASE(LOG10_F);
481 case TargetOpcode::G_FLOG:
482 RTLIBCASE(LOG_F);
483 case TargetOpcode::G_FLOG2:
484 RTLIBCASE(LOG2_F);
485 case TargetOpcode::G_FLDEXP:
486 RTLIBCASE(LDEXP_F);
487 case TargetOpcode::G_FCEIL:
488 RTLIBCASE(CEIL_F);
489 case TargetOpcode::G_FFLOOR:
490 RTLIBCASE(FLOOR_F);
491 case TargetOpcode::G_FMINNUM:
492 RTLIBCASE(FMIN_F);
493 case TargetOpcode::G_FMAXNUM:
494 RTLIBCASE(FMAX_F);
495 case TargetOpcode::G_FMINIMUMNUM:
496 RTLIBCASE(FMINIMUM_NUM_F);
497 case TargetOpcode::G_FMAXIMUMNUM:
498 RTLIBCASE(FMAXIMUM_NUM_F);
499 case TargetOpcode::G_FSQRT:
500 RTLIBCASE(SQRT_F);
501 case TargetOpcode::G_FRINT:
502 RTLIBCASE(RINT_F);
503 case TargetOpcode::G_FNEARBYINT:
504 RTLIBCASE(NEARBYINT_F);
505 case TargetOpcode::G_INTRINSIC_TRUNC:
506 RTLIBCASE(TRUNC_F);
507 case TargetOpcode::G_INTRINSIC_ROUND:
508 RTLIBCASE(ROUND_F);
509 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
510 RTLIBCASE(ROUNDEVEN_F);
511 case TargetOpcode::G_INTRINSIC_LRINT:
512 RTLIBCASE(LRINT_F);
513 case TargetOpcode::G_INTRINSIC_LLRINT:
514 RTLIBCASE(LLRINT_F);
515 }
516 llvm_unreachable("Unknown libcall function");
517#undef RTLIBCASE_INT
518#undef RTLIBCASE
519}
520
521/// True if an instruction is in tail position in its caller. Intended for
522/// legalizing libcalls as tail calls when possible.
525 const TargetInstrInfo &TII,
526 MachineRegisterInfo &MRI) {
527 MachineBasicBlock &MBB = *MI.getParent();
528 const Function &F = MBB.getParent()->getFunction();
529
530 // Conservatively require the attributes of the call to match those of
531 // the return. Ignore NoAlias and NonNull because they don't affect the
532 // call sequence.
533 AttributeList CallerAttrs = F.getAttributes();
534 if (AttrBuilder(F.getContext(), CallerAttrs.getRetAttrs())
535 .removeAttribute(Attribute::NoAlias)
536 .removeAttribute(Attribute::NonNull)
537 .hasAttributes())
538 return false;
539
540 // It's not safe to eliminate the sign / zero extension of the return value.
541 if (CallerAttrs.hasRetAttr(Attribute::ZExt) ||
542 CallerAttrs.hasRetAttr(Attribute::SExt))
543 return false;
544
545 // Only tail call if the following instruction is a standard return or if we
546 // have a `thisreturn` callee, and a sequence like:
547 //
548 // G_MEMCPY %0, %1, %2
549 // $x0 = COPY %0
550 // RET_ReallyLR implicit $x0
551 auto Next = next_nodbg(MI.getIterator(), MBB.instr_end());
552 if (Next != MBB.instr_end() && Next->isCopy()) {
553 if (MI.getOpcode() == TargetOpcode::G_BZERO)
554 return false;
555
556 // For MEMCPY/MOMMOVE/MEMSET these will be the first use (the dst), as the
557 // mempy/etc routines return the same parameter. For other it will be the
558 // returned value.
559 Register VReg = MI.getOperand(0).getReg();
560 if (!VReg.isVirtual() || VReg != Next->getOperand(1).getReg())
561 return false;
562
563 Register PReg = Next->getOperand(0).getReg();
564 if (!PReg.isPhysical())
565 return false;
566
567 auto Ret = next_nodbg(Next, MBB.instr_end());
568 if (Ret == MBB.instr_end() || !Ret->isReturn())
569 return false;
570
571 if (Ret->getNumImplicitOperands() != 1)
572 return false;
573
574 if (!Ret->getOperand(0).isReg() || PReg != Ret->getOperand(0).getReg())
575 return false;
576
577 // Skip over the COPY that we just validated.
578 Next = Ret;
579 }
580
581 if (Next == MBB.instr_end() || TII.isTailCall(*Next) || !Next->isReturn())
582 return false;
583
584 return true;
585}
586
588 const char *Name, const CallLowering::ArgInfo &Result,
590 LostDebugLocObserver &LocObserver, MachineInstr *MI) const {
591 auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering();
592
594 Info.CallConv = CC;
595 Info.Callee = MachineOperand::CreateES(Name);
596 Info.OrigRet = Result;
597 if (MI)
598 Info.IsTailCall =
599 (Result.Ty->isVoidTy() ||
600 Result.Ty == MIRBuilder.getMF().getFunction().getReturnType()) &&
601 isLibCallInTailPosition(Result, *MI, MIRBuilder.getTII(),
602 *MIRBuilder.getMRI());
603
604 llvm::append_range(Info.OrigArgs, Args);
605 if (!CLI.lowerCall(MIRBuilder, Info))
607
608 if (MI && Info.LoweredTailCall) {
609 assert(Info.IsTailCall && "Lowered tail call when it wasn't a tail call?");
610
611 // Check debug locations before removing the return.
612 LocObserver.checkpoint(true);
613
614 // We must have a return following the call (or debug insts) to get past
615 // isLibCallInTailPosition.
616 do {
617 MachineInstr *Next = MI->getNextNode();
618 assert(Next &&
619 (Next->isCopy() || Next->isReturn() || Next->isDebugInstr()) &&
620 "Expected instr following MI to be return or debug inst?");
621 // We lowered a tail call, so the call is now the return from the block.
622 // Delete the old return.
623 Next->eraseFromParent();
624 } while (MI->getNextNode());
625
626 // We expect to lose the debug location from the return.
627 LocObserver.checkpoint(false);
628 }
630}
631
633 RTLIB::Libcall Libcall, const CallLowering::ArgInfo &Result,
635 MachineInstr *MI) const {
636 if (!Libcalls)
638
639 RTLIB::LibcallImpl LibcallImpl = Libcalls->getLibcallImpl(Libcall);
640 if (LibcallImpl == RTLIB::Unsupported)
642
644 const CallingConv::ID CC = Libcalls->getLibcallImplCallingConv(LibcallImpl);
645 return createLibcall(Name.data(), Result, Args, CC, LocObserver, MI);
646}
647
648// Useful for libcalls where all operands have the same type.
651 unsigned Size, Type *OpType,
652 LostDebugLocObserver &LocObserver) const {
653 auto Libcall = getRTLibDesc(MI.getOpcode(), Size);
654
655 // FIXME: What does the original arg index mean here?
657 for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
658 Args.push_back({MO.getReg(), OpType, 0});
659 return createLibcall(Libcall, {MI.getOperand(0).getReg(), OpType, 0}, Args,
660 LocObserver, &MI);
661}
662
663LegalizerHelper::LegalizeResult LegalizerHelper::emitSincosLibcall(
664 MachineInstr &MI, MachineIRBuilder &MIRBuilder, unsigned Size, Type *OpType,
665 LostDebugLocObserver &LocObserver) {
666 MachineFunction &MF = *MI.getMF();
668
669 Register DstSin = MI.getOperand(0).getReg();
670 Register DstCos = MI.getOperand(1).getReg();
671 Register Src = MI.getOperand(2).getReg();
672 LLT DstTy = MRI.getType(DstSin);
673
674 int MemSize = DstTy.getSizeInBytes();
675 Align Alignment = getStackTemporaryAlignment(DstTy);
677 unsigned AddrSpace = DL.getAllocaAddrSpace();
678 MachinePointerInfo PtrInfo;
679
680 Register StackPtrSin =
681 createStackTemporary(TypeSize::getFixed(MemSize), Alignment, PtrInfo)
682 .getReg(0);
683 Register StackPtrCos =
684 createStackTemporary(TypeSize::getFixed(MemSize), Alignment, PtrInfo)
685 .getReg(0);
686
687 auto &Ctx = MF.getFunction().getContext();
688 auto LibcallResult = createLibcall(
689 getRTLibDesc(MI.getOpcode(), Size), {{0}, Type::getVoidTy(Ctx), 0},
690 {{Src, OpType, 0},
691 {StackPtrSin, PointerType::get(Ctx, AddrSpace), 1},
692 {StackPtrCos, PointerType::get(Ctx, AddrSpace), 2}},
693 LocObserver, &MI);
694
695 if (LibcallResult != LegalizeResult::Legalized)
697
699 PtrInfo, MachineMemOperand::MOLoad, MemSize, Alignment);
701 PtrInfo, MachineMemOperand::MOLoad, MemSize, Alignment);
702
703 MIRBuilder.buildLoad(DstSin, StackPtrSin, *LoadMMOSin);
704 MIRBuilder.buildLoad(DstCos, StackPtrCos, *LoadMMOCos);
705 MI.eraseFromParent();
706
708}
709
711LegalizerHelper::emitModfLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder,
712 unsigned Size, Type *OpType,
713 LostDebugLocObserver &LocObserver) {
714 MachineFunction &MF = MIRBuilder.getMF();
715 MachineRegisterInfo &MRI = MF.getRegInfo();
716
717 Register DstFrac = MI.getOperand(0).getReg();
718 Register DstInt = MI.getOperand(1).getReg();
719 Register Src = MI.getOperand(2).getReg();
720 LLT DstTy = MRI.getType(DstFrac);
721
722 int MemSize = DstTy.getSizeInBytes();
723 Align Alignment = getStackTemporaryAlignment(DstTy);
724 const DataLayout &DL = MIRBuilder.getDataLayout();
725 unsigned AddrSpace = DL.getAllocaAddrSpace();
726 MachinePointerInfo PtrInfo;
727
728 Register StackPtrInt =
729 createStackTemporary(TypeSize::getFixed(MemSize), Alignment, PtrInfo)
730 .getReg(0);
731
732 auto &Ctx = MF.getFunction().getContext();
733 auto LibcallResult = createLibcall(
734 getRTLibDesc(MI.getOpcode(), Size), {DstFrac, OpType, 0},
735 {{Src, OpType, 0}, {StackPtrInt, PointerType::get(Ctx, AddrSpace), 1}},
736 LocObserver, &MI);
737
738 if (LibcallResult != LegalizeResult::Legalized)
740
742 PtrInfo, MachineMemOperand::MOLoad, MemSize, Alignment);
743
744 MIRBuilder.buildLoad(DstInt, StackPtrInt, *LoadMMOInt);
745 MI.eraseFromParent();
746
748}
749
750static RTLIB::Libcall getConvRTLibDesc(unsigned Opcode, Type *ToType,
751 Type *FromType) {
752 auto ToMVT = MVT::getVT(ToType);
753 auto FromMVT = MVT::getVT(FromType);
754
755 switch (Opcode) {
756 case TargetOpcode::G_FPEXT:
757 return RTLIB::getFPEXT(FromMVT, ToMVT);
758 case TargetOpcode::G_FPTRUNC:
759 return RTLIB::getFPROUND(FromMVT, ToMVT);
760 case TargetOpcode::G_FPTOSI:
761 return RTLIB::getFPTOSINT(FromMVT, ToMVT);
762 case TargetOpcode::G_FPTOUI:
763 return RTLIB::getFPTOUINT(FromMVT, ToMVT);
764 case TargetOpcode::G_SITOFP:
765 return RTLIB::getSINTTOFP(FromMVT, ToMVT);
766 case TargetOpcode::G_UITOFP:
767 return RTLIB::getUINTTOFP(FromMVT, ToMVT);
768 }
769 llvm_unreachable("Unsupported libcall function");
770}
771
773 MachineInstr &MI, Type *ToType, Type *FromType,
774 LostDebugLocObserver &LocObserver, bool IsSigned) const {
775 CallLowering::ArgInfo Arg = {MI.getOperand(1).getReg(), FromType, 0};
776 if (FromType->isIntegerTy()) {
777 if (TLI.shouldSignExtendTypeInLibCall(FromType, IsSigned))
778 Arg.Flags[0].setSExt();
779 else
780 Arg.Flags[0].setZExt();
781 }
782
783 RTLIB::Libcall Libcall = getConvRTLibDesc(MI.getOpcode(), ToType, FromType);
784 return createLibcall(Libcall, {MI.getOperand(0).getReg(), ToType, 0}, Arg,
785 LocObserver, &MI);
786}
787
790 LostDebugLocObserver &LocObserver) const {
791 auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
792
794 // Add all the args, except for the last which is an imm denoting 'tail'.
795 for (unsigned i = 0; i < MI.getNumOperands() - 1; ++i) {
796 Register Reg = MI.getOperand(i).getReg();
797
798 // Need derive an IR type for call lowering.
799 LLT OpLLT = MRI.getType(Reg);
800 Type *OpTy = nullptr;
801 if (OpLLT.isPointer())
802 OpTy = PointerType::get(Ctx, OpLLT.getAddressSpace());
803 else
804 OpTy = IntegerType::get(Ctx, OpLLT.getSizeInBits());
805 Args.push_back({Reg, OpTy, 0});
806 }
807
808 auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering();
809 RTLIB::Libcall RTLibcall;
810 unsigned Opc = MI.getOpcode();
811 switch (Opc) {
812 case TargetOpcode::G_BZERO:
813 RTLibcall = RTLIB::BZERO;
814 break;
815 case TargetOpcode::G_MEMCPY:
816 RTLibcall = RTLIB::MEMCPY;
817 Args[0].Flags[0].setReturned();
818 break;
819 case TargetOpcode::G_MEMMOVE:
820 RTLibcall = RTLIB::MEMMOVE;
821 Args[0].Flags[0].setReturned();
822 break;
823 case TargetOpcode::G_MEMSET:
824 RTLibcall = RTLIB::MEMSET;
825 Args[0].Flags[0].setReturned();
826 break;
827 default:
828 llvm_unreachable("unsupported opcode");
829 }
830
831 if (!Libcalls) // FIXME: Should be mandatory
833
834 RTLIB::LibcallImpl RTLibcallImpl = Libcalls->getLibcallImpl(RTLibcall);
835
836 // Unsupported libcall on the target.
837 if (RTLibcallImpl == RTLIB::Unsupported) {
838 LLVM_DEBUG(dbgs() << ".. .. Could not find libcall name for "
839 << MIRBuilder.getTII().getName(Opc) << "\n");
841 }
842
844 Info.CallConv = Libcalls->getLibcallImplCallingConv(RTLibcallImpl);
845
846 StringRef LibcallName =
848 Info.Callee = MachineOperand::CreateES(LibcallName.data());
849 Info.OrigRet = CallLowering::ArgInfo({0}, Type::getVoidTy(Ctx), 0);
850 Info.IsTailCall =
851 MI.getOperand(MI.getNumOperands() - 1).getImm() &&
852 isLibCallInTailPosition(Info.OrigRet, MI, MIRBuilder.getTII(), MRI);
853
854 llvm::append_range(Info.OrigArgs, Args);
855 if (!CLI.lowerCall(MIRBuilder, Info))
857
858 if (Info.LoweredTailCall) {
859 assert(Info.IsTailCall && "Lowered tail call when it wasn't a tail call?");
860
861 // Check debug locations before removing the return.
862 LocObserver.checkpoint(true);
863
864 // We must have a return following the call (or debug insts) to get past
865 // isLibCallInTailPosition.
866 do {
867 MachineInstr *Next = MI.getNextNode();
868 assert(Next &&
869 (Next->isCopy() || Next->isReturn() || Next->isDebugInstr()) &&
870 "Expected instr following MI to be return or debug inst?");
871 // We lowered a tail call, so the call is now the return from the block.
872 // Delete the old return.
873 Next->eraseFromParent();
874 } while (MI.getNextNode());
875
876 // We expect to lose the debug location from the return.
877 LocObserver.checkpoint(false);
878 }
879
881}
882
883static RTLIB::Libcall getOutlineAtomicLibcall(MachineInstr &MI) {
884 unsigned Opc = MI.getOpcode();
885 auto &AtomicMI = cast<GMemOperation>(MI);
886 auto &MMO = AtomicMI.getMMO();
887 auto Ordering = MMO.getMergedOrdering();
888 LLT MemType = MMO.getMemoryType();
889 uint64_t MemSize = MemType.getSizeInBytes();
890 if (MemType.isVector())
891 return RTLIB::UNKNOWN_LIBCALL;
892
893#define LCALLS(A, B) {A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL}
894#define LCALL5(A) \
895 LCALLS(A, 1), LCALLS(A, 2), LCALLS(A, 4), LCALLS(A, 8), LCALLS(A, 16)
896 switch (Opc) {
897 case TargetOpcode::G_ATOMIC_CMPXCHG:
898 case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: {
899 const RTLIB::Libcall LC[5][4] = {LCALL5(RTLIB::OUTLINE_ATOMIC_CAS)};
900 return getOutlineAtomicHelper(LC, Ordering, MemSize);
901 }
902 case TargetOpcode::G_ATOMICRMW_XCHG: {
903 const RTLIB::Libcall LC[5][4] = {LCALL5(RTLIB::OUTLINE_ATOMIC_SWP)};
904 return getOutlineAtomicHelper(LC, Ordering, MemSize);
905 }
906 case TargetOpcode::G_ATOMICRMW_ADD:
907 case TargetOpcode::G_ATOMICRMW_SUB: {
908 const RTLIB::Libcall LC[5][4] = {LCALL5(RTLIB::OUTLINE_ATOMIC_LDADD)};
909 return getOutlineAtomicHelper(LC, Ordering, MemSize);
910 }
911 case TargetOpcode::G_ATOMICRMW_AND: {
912 const RTLIB::Libcall LC[5][4] = {LCALL5(RTLIB::OUTLINE_ATOMIC_LDCLR)};
913 return getOutlineAtomicHelper(LC, Ordering, MemSize);
914 }
915 case TargetOpcode::G_ATOMICRMW_OR: {
916 const RTLIB::Libcall LC[5][4] = {LCALL5(RTLIB::OUTLINE_ATOMIC_LDSET)};
917 return getOutlineAtomicHelper(LC, Ordering, MemSize);
918 }
919 case TargetOpcode::G_ATOMICRMW_XOR: {
920 const RTLIB::Libcall LC[5][4] = {LCALL5(RTLIB::OUTLINE_ATOMIC_LDEOR)};
921 return getOutlineAtomicHelper(LC, Ordering, MemSize);
922 }
923 default:
924 return RTLIB::UNKNOWN_LIBCALL;
925 }
926#undef LCALLS
927#undef LCALL5
928}
929
932 auto &Ctx = MIRBuilder.getContext();
933
934 Type *RetTy;
935 SmallVector<Register> RetRegs;
937 unsigned Opc = MI.getOpcode();
938 switch (Opc) {
939 case TargetOpcode::G_ATOMIC_CMPXCHG:
940 case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: {
942 LLT SuccessLLT;
943 auto [Ret, RetLLT, Mem, MemLLT, Cmp, CmpLLT, New, NewLLT] =
944 MI.getFirst4RegLLTs();
945 RetRegs.push_back(Ret);
946 RetTy = IntegerType::get(Ctx, RetLLT.getSizeInBits());
947 if (Opc == TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS) {
948 std::tie(Ret, RetLLT, Success, SuccessLLT, Mem, MemLLT, Cmp, CmpLLT, New,
949 NewLLT) = MI.getFirst5RegLLTs();
950 RetRegs.push_back(Success);
951 RetTy = StructType::get(
952 Ctx, {RetTy, IntegerType::get(Ctx, SuccessLLT.getSizeInBits())});
953 }
954 Args.push_back({Cmp, IntegerType::get(Ctx, CmpLLT.getSizeInBits()), 0});
955 Args.push_back({New, IntegerType::get(Ctx, NewLLT.getSizeInBits()), 0});
956 Args.push_back({Mem, PointerType::get(Ctx, MemLLT.getAddressSpace()), 0});
957 break;
958 }
959 case TargetOpcode::G_ATOMICRMW_XCHG:
960 case TargetOpcode::G_ATOMICRMW_ADD:
961 case TargetOpcode::G_ATOMICRMW_SUB:
962 case TargetOpcode::G_ATOMICRMW_AND:
963 case TargetOpcode::G_ATOMICRMW_OR:
964 case TargetOpcode::G_ATOMICRMW_XOR: {
965 auto [Ret, RetLLT, Mem, MemLLT, Val, ValLLT] = MI.getFirst3RegLLTs();
966 RetRegs.push_back(Ret);
967 RetTy = IntegerType::get(Ctx, RetLLT.getSizeInBits());
968 if (Opc == TargetOpcode::G_ATOMICRMW_AND)
969 Val =
970 MIRBuilder.buildXor(ValLLT, MIRBuilder.buildConstant(ValLLT, -1), Val)
971 .getReg(0);
972 else if (Opc == TargetOpcode::G_ATOMICRMW_SUB)
973 Val =
974 MIRBuilder.buildSub(ValLLT, MIRBuilder.buildConstant(ValLLT, 0), Val)
975 .getReg(0);
976 Args.push_back({Val, IntegerType::get(Ctx, ValLLT.getSizeInBits()), 0});
977 Args.push_back({Mem, PointerType::get(Ctx, MemLLT.getAddressSpace()), 0});
978 break;
979 }
980 default:
981 llvm_unreachable("unsupported opcode");
982 }
983
984 if (!Libcalls) // FIXME: Should be mandatory
986
987 auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering();
988 RTLIB::Libcall RTLibcall = getOutlineAtomicLibcall(MI);
989 RTLIB::LibcallImpl RTLibcallImpl = Libcalls->getLibcallImpl(RTLibcall);
990
991 // Unsupported libcall on the target.
992 if (RTLibcallImpl == RTLIB::Unsupported) {
993 LLVM_DEBUG(dbgs() << ".. .. Could not find libcall name for "
994 << MIRBuilder.getTII().getName(Opc) << "\n");
996 }
997
999 Info.CallConv = Libcalls->getLibcallImplCallingConv(RTLibcallImpl);
1000
1001 StringRef LibcallName =
1003 Info.Callee = MachineOperand::CreateES(LibcallName.data());
1004 Info.OrigRet = CallLowering::ArgInfo(RetRegs, RetTy, 0);
1005
1006 llvm::append_range(Info.OrigArgs, Args);
1007 if (!CLI.lowerCall(MIRBuilder, Info))
1009
1011}
1012
1013static RTLIB::Libcall
1015 RTLIB::Libcall RTLibcall;
1016 switch (MI.getOpcode()) {
1017 case TargetOpcode::G_GET_FPENV:
1018 RTLibcall = RTLIB::FEGETENV;
1019 break;
1020 case TargetOpcode::G_SET_FPENV:
1021 case TargetOpcode::G_RESET_FPENV:
1022 RTLibcall = RTLIB::FESETENV;
1023 break;
1024 case TargetOpcode::G_GET_FPMODE:
1025 RTLibcall = RTLIB::FEGETMODE;
1026 break;
1027 case TargetOpcode::G_SET_FPMODE:
1028 case TargetOpcode::G_RESET_FPMODE:
1029 RTLibcall = RTLIB::FESETMODE;
1030 break;
1031 default:
1032 llvm_unreachable("Unexpected opcode");
1033 }
1034 return RTLibcall;
1035}
1036
1037// Some library functions that read FP state (fegetmode, fegetenv) write the
1038// state into a region in memory. IR intrinsics that do the same operations
1039// (get_fpmode, get_fpenv) return the state as integer value. To implement these
1040// intrinsics via the library functions, we need to use temporary variable,
1041// for example:
1042//
1043// %0:_(s32) = G_GET_FPMODE
1044//
1045// is transformed to:
1046//
1047// %1:_(p0) = G_FRAME_INDEX %stack.0
1048// BL &fegetmode
1049// %0:_(s32) = G_LOAD % 1
1050//
1052LegalizerHelper::createGetStateLibcall(MachineInstr &MI,
1053 LostDebugLocObserver &LocObserver) {
1054 const DataLayout &DL = MIRBuilder.getDataLayout();
1055 auto &MF = MIRBuilder.getMF();
1056 auto &MRI = *MIRBuilder.getMRI();
1057 auto &Ctx = MF.getFunction().getContext();
1058
1059 // Create temporary, where library function will put the read state.
1060 Register Dst = MI.getOperand(0).getReg();
1061 LLT StateTy = MRI.getType(Dst);
1062 TypeSize StateSize = StateTy.getSizeInBytes();
1063 Align TempAlign = getStackTemporaryAlignment(StateTy);
1064 MachinePointerInfo TempPtrInfo;
1065 auto Temp = createStackTemporary(StateSize, TempAlign, TempPtrInfo);
1066
1067 // Create a call to library function, with the temporary as an argument.
1068 unsigned TempAddrSpace = DL.getAllocaAddrSpace();
1069 Type *StatePtrTy = PointerType::get(Ctx, TempAddrSpace);
1070 RTLIB::Libcall RTLibcall = getStateLibraryFunctionFor(MI, TLI);
1071 auto Res = createLibcall(
1072 RTLibcall, CallLowering::ArgInfo({0}, Type::getVoidTy(Ctx), 0),
1073 CallLowering::ArgInfo({Temp.getReg(0), StatePtrTy, 0}), LocObserver,
1074 nullptr);
1075 if (Res != LegalizerHelper::Legalized)
1076 return Res;
1077
1078 // Create a load from the temporary.
1079 MachineMemOperand *MMO = MF.getMachineMemOperand(
1080 TempPtrInfo, MachineMemOperand::MOLoad, StateTy, TempAlign);
1081 MIRBuilder.buildLoadInstr(TargetOpcode::G_LOAD, Dst, Temp, *MMO);
1082
1084}
1085
1086// Similar to `createGetStateLibcall` the function calls a library function
1087// using transient space in stack. In this case the library function reads
1088// content of memory region.
1090LegalizerHelper::createSetStateLibcall(MachineInstr &MI,
1091 LostDebugLocObserver &LocObserver) {
1092 const DataLayout &DL = MIRBuilder.getDataLayout();
1093 auto &MF = MIRBuilder.getMF();
1094 auto &MRI = *MIRBuilder.getMRI();
1095 auto &Ctx = MF.getFunction().getContext();
1096
1097 // Create temporary, where library function will get the new state.
1098 Register Src = MI.getOperand(0).getReg();
1099 LLT StateTy = MRI.getType(Src);
1100 TypeSize StateSize = StateTy.getSizeInBytes();
1101 Align TempAlign = getStackTemporaryAlignment(StateTy);
1102 MachinePointerInfo TempPtrInfo;
1103 auto Temp = createStackTemporary(StateSize, TempAlign, TempPtrInfo);
1104
1105 // Put the new state into the temporary.
1106 MachineMemOperand *MMO = MF.getMachineMemOperand(
1107 TempPtrInfo, MachineMemOperand::MOStore, StateTy, TempAlign);
1108 MIRBuilder.buildStore(Src, Temp, *MMO);
1109
1110 // Create a call to library function, with the temporary as an argument.
1111 unsigned TempAddrSpace = DL.getAllocaAddrSpace();
1112 Type *StatePtrTy = PointerType::get(Ctx, TempAddrSpace);
1113 RTLIB::Libcall RTLibcall = getStateLibraryFunctionFor(MI, TLI);
1114 return createLibcall(RTLibcall,
1115 CallLowering::ArgInfo({0}, Type::getVoidTy(Ctx), 0),
1116 CallLowering::ArgInfo({Temp.getReg(0), StatePtrTy, 0}),
1117 LocObserver, nullptr);
1118}
1119
1120/// Returns the corresponding libcall for the given Pred and
1121/// the ICMP predicate that should be generated to compare with #0
1122/// after the libcall.
1123static std::pair<RTLIB::Libcall, CmpInst::Predicate>
1125#define RTLIBCASE_CMP(LibcallPrefix, ICmpPred) \
1126 do { \
1127 switch (Size) { \
1128 case 32: \
1129 return {RTLIB::LibcallPrefix##32, ICmpPred}; \
1130 case 64: \
1131 return {RTLIB::LibcallPrefix##64, ICmpPred}; \
1132 case 128: \
1133 return {RTLIB::LibcallPrefix##128, ICmpPred}; \
1134 default: \
1135 llvm_unreachable("unexpected size"); \
1136 } \
1137 } while (0)
1138
1139 switch (Pred) {
1140 case CmpInst::FCMP_OEQ:
1142 case CmpInst::FCMP_UNE:
1144 case CmpInst::FCMP_OGE:
1146 case CmpInst::FCMP_OLT:
1148 case CmpInst::FCMP_OLE:
1150 case CmpInst::FCMP_OGT:
1152 case CmpInst::FCMP_UNO:
1154 default:
1155 return {RTLIB::UNKNOWN_LIBCALL, CmpInst::BAD_ICMP_PREDICATE};
1156 }
1157}
1158
1160LegalizerHelper::createFCMPLibcall(MachineInstr &MI,
1161 LostDebugLocObserver &LocObserver) {
1162 auto &MF = MIRBuilder.getMF();
1163 auto &Ctx = MF.getFunction().getContext();
1164 const GFCmp *Cmp = cast<GFCmp>(&MI);
1165
1166 LLT OpLLT = MRI.getType(Cmp->getLHSReg());
1167 unsigned Size = OpLLT.getSizeInBits();
1168 if ((Size != 32 && Size != 64 && Size != 128) ||
1169 OpLLT != MRI.getType(Cmp->getRHSReg()))
1170 return UnableToLegalize;
1171
1172 Type *OpType = getFloatTypeForLLT(Ctx, OpLLT);
1173
1174 // DstReg type is s32
1175 const Register DstReg = Cmp->getReg(0);
1176 LLT DstTy = MRI.getType(DstReg);
1177 const auto Cond = Cmp->getCond();
1178
1179 // Reference:
1180 // https://gcc.gnu.org/onlinedocs/gccint/Soft-float-library-routines.html#Comparison-functions-1
1181 // Generates a libcall followed by ICMP.
1182 const auto BuildLibcall = [&](const RTLIB::Libcall Libcall,
1183 const CmpInst::Predicate ICmpPred,
1184 const DstOp &Res) -> Register {
1185 // FCMP libcall always returns an i32, and needs an ICMP with #0.
1186 LLT TempLLT = LLT::integer(32);
1187 Register Temp = MRI.createGenericVirtualRegister(TempLLT);
1188 // Generate libcall, holding result in Temp
1189 const auto Status = createLibcall(
1190 Libcall, {Temp, Type::getInt32Ty(Ctx), 0},
1191 {{Cmp->getLHSReg(), OpType, 0}, {Cmp->getRHSReg(), OpType, 1}},
1192 LocObserver, &MI);
1193 if (!Status)
1194 return {};
1195
1196 // Compare temp with #0 to get the final result.
1197 return MIRBuilder
1198 .buildICmp(ICmpPred, Res, Temp, MIRBuilder.buildConstant(TempLLT, 0))
1199 .getReg(0);
1200 };
1201
1202 // Simple case if we have a direct mapping from predicate to libcall
1203 if (const auto [Libcall, ICmpPred] = getFCMPLibcallDesc(Cond, Size);
1204 Libcall != RTLIB::UNKNOWN_LIBCALL &&
1205 ICmpPred != CmpInst::BAD_ICMP_PREDICATE) {
1206 if (BuildLibcall(Libcall, ICmpPred, DstReg)) {
1207 return Legalized;
1208 }
1209 return UnableToLegalize;
1210 }
1211
1212 // No direct mapping found, should be generated as combination of libcalls.
1213
1214 switch (Cond) {
1215 case CmpInst::FCMP_UEQ: {
1216 // FCMP_UEQ: unordered or equal
1217 // Convert into (FCMP_OEQ || FCMP_UNO).
1218
1219 const auto [OeqLibcall, OeqPred] =
1221 const auto Oeq = BuildLibcall(OeqLibcall, OeqPred, DstTy);
1222
1223 const auto [UnoLibcall, UnoPred] =
1225 const auto Uno = BuildLibcall(UnoLibcall, UnoPred, DstTy);
1226 if (Oeq && Uno)
1227 MIRBuilder.buildOr(DstReg, Oeq, Uno);
1228 else
1229 return UnableToLegalize;
1230
1231 break;
1232 }
1233 case CmpInst::FCMP_ONE: {
1234 // FCMP_ONE: ordered and operands are unequal
1235 // Convert into (!FCMP_OEQ && !FCMP_UNO).
1236
1237 // We inverse the predicate instead of generating a NOT
1238 // to save one instruction.
1239 // On AArch64 isel can even select two cmp into a single ccmp.
1240 const auto [OeqLibcall, OeqPred] =
1242 const auto NotOeq =
1243 BuildLibcall(OeqLibcall, CmpInst::getInversePredicate(OeqPred), DstTy);
1244
1245 const auto [UnoLibcall, UnoPred] =
1247 const auto NotUno =
1248 BuildLibcall(UnoLibcall, CmpInst::getInversePredicate(UnoPred), DstTy);
1249
1250 if (NotOeq && NotUno)
1251 MIRBuilder.buildAnd(DstReg, NotOeq, NotUno);
1252 else
1253 return UnableToLegalize;
1254
1255 break;
1256 }
1257 case CmpInst::FCMP_ULT:
1258 case CmpInst::FCMP_UGE:
1259 case CmpInst::FCMP_UGT:
1260 case CmpInst::FCMP_ULE:
1261 case CmpInst::FCMP_ORD: {
1262 // Convert into: !(inverse(Pred))
1263 // E.g. FCMP_ULT becomes !FCMP_OGE
1264 // This is equivalent to the following, but saves some instructions.
1265 // MIRBuilder.buildNot(
1266 // PredTy,
1267 // MIRBuilder.buildFCmp(CmpInst::getInversePredicate(Pred), PredTy,
1268 // Op1, Op2));
1269 const auto [InversedLibcall, InversedPred] =
1271 if (!BuildLibcall(InversedLibcall,
1272 CmpInst::getInversePredicate(InversedPred), DstReg))
1273 return UnableToLegalize;
1274 break;
1275 }
1276 default:
1277 return UnableToLegalize;
1278 }
1279
1280 return Legalized;
1281}
1282
1283// The function is used to legalize operations that set default environment
1284// state. In C library a call like `fesetmode(FE_DFL_MODE)` is used for that.
1285// On most targets supported in glibc FE_DFL_MODE is defined as
1286// `((const femode_t *) -1)`. Such assumption is used here. If for some target
1287// it is not true, the target must provide custom lowering.
1289LegalizerHelper::createResetStateLibcall(MachineInstr &MI,
1290 LostDebugLocObserver &LocObserver) {
1291 const DataLayout &DL = MIRBuilder.getDataLayout();
1292 auto &MF = MIRBuilder.getMF();
1293 auto &Ctx = MF.getFunction().getContext();
1294
1295 // Create an argument for the library function.
1296 unsigned AddrSpace = DL.getDefaultGlobalsAddressSpace();
1297 Type *StatePtrTy = PointerType::get(Ctx, AddrSpace);
1298 unsigned PtrSize = DL.getPointerSizeInBits(AddrSpace);
1299 LLT MemTy = LLT::pointer(AddrSpace, PtrSize);
1300 auto DefValue = MIRBuilder.buildConstant(LLT::integer(PtrSize), -1LL);
1301 DstOp Dest(MRI.createGenericVirtualRegister(MemTy));
1302 MIRBuilder.buildIntToPtr(Dest, DefValue);
1303
1304 RTLIB::Libcall RTLibcall = getStateLibraryFunctionFor(MI, TLI);
1305 return createLibcall(
1306 RTLibcall, CallLowering::ArgInfo({0}, Type::getVoidTy(Ctx), 0),
1307 CallLowering::ArgInfo({Dest.getReg(), StatePtrTy, 0}), LocObserver, &MI);
1308}
1309
1312 auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
1313
1314 switch (MI.getOpcode()) {
1315 default:
1316 return UnableToLegalize;
1317 case TargetOpcode::G_MUL:
1318 case TargetOpcode::G_SDIV:
1319 case TargetOpcode::G_UDIV:
1320 case TargetOpcode::G_SREM:
1321 case TargetOpcode::G_UREM:
1322 case TargetOpcode::G_CTLZ_ZERO_POISON: {
1323 LLT LLTy = MRI.getType(MI.getOperand(0).getReg());
1324 unsigned Size = LLTy.getSizeInBits();
1325 Type *HLTy = IntegerType::get(Ctx, Size);
1326 auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy, LocObserver);
1327 if (Status != Legalized)
1328 return Status;
1329 break;
1330 }
1331 case TargetOpcode::G_FADD:
1332 case TargetOpcode::G_FSUB:
1333 case TargetOpcode::G_FMUL:
1334 case TargetOpcode::G_FDIV:
1335 case TargetOpcode::G_FMA:
1336 case TargetOpcode::G_FPOW:
1337 case TargetOpcode::G_FREM:
1338 case TargetOpcode::G_FCOS:
1339 case TargetOpcode::G_FSIN:
1340 case TargetOpcode::G_FTAN:
1341 case TargetOpcode::G_FACOS:
1342 case TargetOpcode::G_FASIN:
1343 case TargetOpcode::G_FATAN:
1344 case TargetOpcode::G_FATAN2:
1345 case TargetOpcode::G_FCOSH:
1346 case TargetOpcode::G_FSINH:
1347 case TargetOpcode::G_FTANH:
1348 case TargetOpcode::G_FLOG10:
1349 case TargetOpcode::G_FLOG:
1350 case TargetOpcode::G_FLOG2:
1351 case TargetOpcode::G_FEXP:
1352 case TargetOpcode::G_FEXP2:
1353 case TargetOpcode::G_FEXP10:
1354 case TargetOpcode::G_FCEIL:
1355 case TargetOpcode::G_FFLOOR:
1356 case TargetOpcode::G_FMINNUM:
1357 case TargetOpcode::G_FMAXNUM:
1358 case TargetOpcode::G_FMINIMUMNUM:
1359 case TargetOpcode::G_FMAXIMUMNUM:
1360 case TargetOpcode::G_FSQRT:
1361 case TargetOpcode::G_FRINT:
1362 case TargetOpcode::G_FNEARBYINT:
1363 case TargetOpcode::G_INTRINSIC_TRUNC:
1364 case TargetOpcode::G_INTRINSIC_ROUND:
1365 case TargetOpcode::G_INTRINSIC_ROUNDEVEN: {
1366 LLT LLTy = MRI.getType(MI.getOperand(0).getReg());
1367 unsigned Size = LLTy.getSizeInBits();
1368 Type *HLTy = getFloatTypeForLLT(Ctx, LLTy);
1369 if (!HLTy || (Size != 32 && Size != 64 && Size != 80 && Size != 128)) {
1370 LLVM_DEBUG(dbgs() << "No libcall available for type " << LLTy << ".\n");
1371 return UnableToLegalize;
1372 }
1373 auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy, LocObserver);
1374 if (Status != Legalized)
1375 return Status;
1376 break;
1377 }
1378 case TargetOpcode::G_FSINCOS: {
1379 LLT LLTy = MRI.getType(MI.getOperand(0).getReg());
1380 unsigned Size = LLTy.getSizeInBits();
1381 Type *HLTy = getFloatTypeForLLT(Ctx, LLTy);
1382 if (!HLTy || (Size != 32 && Size != 64 && Size != 80 && Size != 128)) {
1383 LLVM_DEBUG(dbgs() << "No libcall available for type " << LLTy << ".\n");
1384 return UnableToLegalize;
1385 }
1386 return emitSincosLibcall(MI, MIRBuilder, Size, HLTy, LocObserver);
1387 }
1388 case TargetOpcode::G_FMODF: {
1389 LLT LLTy = MRI.getType(MI.getOperand(0).getReg());
1390 unsigned Size = LLTy.getSizeInBits();
1391 Type *HLTy = getFloatTypeForLLT(Ctx, LLTy);
1392 if (!HLTy || (Size != 32 && Size != 64 && Size != 80 && Size != 128)) {
1393 LLVM_DEBUG(dbgs() << "No libcall available for type " << LLTy << ".\n");
1394 return UnableToLegalize;
1395 }
1396 return emitModfLibcall(MI, MIRBuilder, Size, HLTy, LocObserver);
1397 }
1398 case TargetOpcode::G_LROUND:
1399 case TargetOpcode::G_LLROUND:
1400 case TargetOpcode::G_INTRINSIC_LRINT:
1401 case TargetOpcode::G_INTRINSIC_LLRINT: {
1402 LLT LLTy = MRI.getType(MI.getOperand(1).getReg());
1403 unsigned Size = LLTy.getSizeInBits();
1404 Type *HLTy = getFloatTypeForLLT(Ctx, LLTy);
1405 Type *ITy = IntegerType::get(
1406 Ctx, MRI.getType(MI.getOperand(0).getReg()).getSizeInBits());
1407 if (!HLTy || (Size != 32 && Size != 64 && Size != 80 && Size != 128)) {
1408 LLVM_DEBUG(dbgs() << "No libcall available for type " << LLTy << ".\n");
1409 return UnableToLegalize;
1410 }
1411 auto Libcall = getRTLibDesc(MI.getOpcode(), Size);
1413 createLibcall(Libcall, {MI.getOperand(0).getReg(), ITy, 0},
1414 {{MI.getOperand(1).getReg(), HLTy, 0}}, LocObserver, &MI);
1415 if (Status != Legalized)
1416 return Status;
1417 MI.eraseFromParent();
1418 return Legalized;
1419 }
1420 case TargetOpcode::G_FPOWI:
1421 case TargetOpcode::G_FLDEXP: {
1422 LLT LLTy = MRI.getType(MI.getOperand(0).getReg());
1423 unsigned Size = LLTy.getSizeInBits();
1424 Type *HLTy = getFloatTypeForLLT(Ctx, LLTy);
1425 Type *ITy = IntegerType::get(
1426 Ctx, MRI.getType(MI.getOperand(2).getReg()).getSizeInBits());
1427 if (!HLTy || (Size != 32 && Size != 64 && Size != 80 && Size != 128)) {
1428 LLVM_DEBUG(dbgs() << "No libcall available for type " << LLTy << ".\n");
1429 return UnableToLegalize;
1430 }
1431 auto Libcall = getRTLibDesc(MI.getOpcode(), Size);
1433 {MI.getOperand(1).getReg(), HLTy, 0},
1434 {MI.getOperand(2).getReg(), ITy, 1}};
1435 Args[1].Flags[0].setSExt();
1437 Libcall, {MI.getOperand(0).getReg(), HLTy, 0}, Args, LocObserver, &MI);
1438 if (Status != Legalized)
1439 return Status;
1440 break;
1441 }
1442 case TargetOpcode::G_FPEXT:
1443 case TargetOpcode::G_FPTRUNC: {
1444 Type *FromTy = getFloatTypeForLLT(Ctx, MRI.getType(MI.getOperand(1).getReg()));
1445 Type *ToTy = getFloatTypeForLLT(Ctx, MRI.getType(MI.getOperand(0).getReg()));
1446 if (!FromTy || !ToTy)
1447 return UnableToLegalize;
1448 LegalizeResult Status = conversionLibcall(MI, ToTy, FromTy, LocObserver);
1449 if (Status != Legalized)
1450 return Status;
1451 break;
1452 }
1453 case TargetOpcode::G_FCMP: {
1454 LegalizeResult Status = createFCMPLibcall(MI, LocObserver);
1455 if (Status != Legalized)
1456 return Status;
1457 MI.eraseFromParent();
1458 return Status;
1459 }
1460 case TargetOpcode::G_FPTOSI:
1461 case TargetOpcode::G_FPTOUI: {
1462 // FIXME: Support other types
1463 Type *FromTy =
1464 getFloatTypeForLLT(Ctx, MRI.getType(MI.getOperand(1).getReg()));
1465 unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
1466 if ((ToSize != 32 && ToSize != 64 && ToSize != 128) || !FromTy)
1467 return UnableToLegalize;
1469 FromTy, LocObserver);
1470 if (Status != Legalized)
1471 return Status;
1472 break;
1473 }
1474 case TargetOpcode::G_SITOFP:
1475 case TargetOpcode::G_UITOFP: {
1476 unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
1477 Type *ToTy =
1478 getFloatTypeForLLT(Ctx, MRI.getType(MI.getOperand(0).getReg()));
1479 if ((FromSize != 32 && FromSize != 64 && FromSize != 128) || !ToTy)
1480 return UnableToLegalize;
1481 bool IsSigned = MI.getOpcode() == TargetOpcode::G_SITOFP;
1483 MI, ToTy, Type::getIntNTy(Ctx, FromSize), LocObserver, IsSigned);
1484 if (Status != Legalized)
1485 return Status;
1486 break;
1487 }
1488 case TargetOpcode::G_ATOMICRMW_XCHG:
1489 case TargetOpcode::G_ATOMICRMW_ADD:
1490 case TargetOpcode::G_ATOMICRMW_SUB:
1491 case TargetOpcode::G_ATOMICRMW_AND:
1492 case TargetOpcode::G_ATOMICRMW_OR:
1493 case TargetOpcode::G_ATOMICRMW_XOR:
1494 case TargetOpcode::G_ATOMIC_CMPXCHG:
1495 case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: {
1497 if (Status != Legalized)
1498 return Status;
1499 break;
1500 }
1501 case TargetOpcode::G_BZERO:
1502 case TargetOpcode::G_MEMCPY:
1503 case TargetOpcode::G_MEMMOVE:
1504 case TargetOpcode::G_MEMSET: {
1505 LegalizeResult Result =
1506 createMemLibcall(*MIRBuilder.getMRI(), MI, LocObserver);
1507 if (Result != Legalized)
1508 return Result;
1509 MI.eraseFromParent();
1510 return Result;
1511 }
1512 case TargetOpcode::G_GET_FPENV:
1513 case TargetOpcode::G_GET_FPMODE: {
1514 LegalizeResult Result = createGetStateLibcall(MI, LocObserver);
1515 if (Result != Legalized)
1516 return Result;
1517 break;
1518 }
1519 case TargetOpcode::G_SET_FPENV:
1520 case TargetOpcode::G_SET_FPMODE: {
1521 LegalizeResult Result = createSetStateLibcall(MI, LocObserver);
1522 if (Result != Legalized)
1523 return Result;
1524 break;
1525 }
1526 case TargetOpcode::G_RESET_FPENV:
1527 case TargetOpcode::G_RESET_FPMODE: {
1528 LegalizeResult Result = createResetStateLibcall(MI, LocObserver);
1529 if (Result != Legalized)
1530 return Result;
1531 break;
1532 }
1533 }
1534
1535 MI.eraseFromParent();
1536 return Legalized;
1537}
1538
1540 unsigned TypeIdx,
1541 LLT NarrowTy) {
1542 uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
1543 uint64_t NarrowSize = NarrowTy.getSizeInBits();
1544
1545 switch (MI.getOpcode()) {
1546 default:
1547 return UnableToLegalize;
1548 case TargetOpcode::G_IMPLICIT_DEF: {
1549 Register DstReg = MI.getOperand(0).getReg();
1550 LLT DstTy = MRI.getType(DstReg);
1551
1552 // If SizeOp0 is not an exact multiple of NarrowSize, emit
1553 // G_ANYEXT(G_IMPLICIT_DEF). Cast result to vector if needed.
1554 // FIXME: Although this would also be legal for the general case, it causes
1555 // a lot of regressions in the emitted code (superfluous COPYs, artifact
1556 // combines not being hit). This seems to be a problem related to the
1557 // artifact combiner.
1558 if (SizeOp0 % NarrowSize != 0) {
1559 LLT ImplicitTy = DstTy.changeElementType(NarrowTy);
1560 Register ImplicitReg = MIRBuilder.buildUndef(ImplicitTy).getReg(0);
1561 MIRBuilder.buildAnyExt(DstReg, ImplicitReg);
1562
1563 MI.eraseFromParent();
1564 return Legalized;
1565 }
1566
1567 int NumParts = SizeOp0 / NarrowSize;
1568
1570 for (int i = 0; i < NumParts; ++i)
1571 DstRegs.push_back(MIRBuilder.buildUndef(NarrowTy).getReg(0));
1572
1573 if (DstTy.isVector())
1574 MIRBuilder.buildBuildVector(DstReg, DstRegs);
1575 else
1576 MIRBuilder.buildMergeLikeInstr(DstReg, DstRegs);
1577 MI.eraseFromParent();
1578 return Legalized;
1579 }
1580 case TargetOpcode::G_CONSTANT: {
1581 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
1582 const APInt &Val = MI.getOperand(1).getCImm()->getValue();
1583 unsigned TotalSize = Ty.getSizeInBits();
1584 unsigned NarrowSize = NarrowTy.getSizeInBits();
1585 int NumParts = TotalSize / NarrowSize;
1586
1587 SmallVector<Register, 4> PartRegs;
1588 for (int I = 0; I != NumParts; ++I) {
1589 unsigned Offset = I * NarrowSize;
1590 auto K = MIRBuilder.buildConstant(NarrowTy,
1591 Val.lshr(Offset).trunc(NarrowSize));
1592 PartRegs.push_back(K.getReg(0));
1593 }
1594
1595 LLT LeftoverTy;
1596 unsigned LeftoverBits = TotalSize - NumParts * NarrowSize;
1597 SmallVector<Register, 1> LeftoverRegs;
1598 if (LeftoverBits != 0) {
1599 LeftoverTy = LLT::scalar(LeftoverBits);
1600 auto K = MIRBuilder.buildConstant(
1601 LeftoverTy,
1602 Val.lshr(NumParts * NarrowSize).trunc(LeftoverBits));
1603 LeftoverRegs.push_back(K.getReg(0));
1604 }
1605
1606 insertParts(MI.getOperand(0).getReg(),
1607 Ty, NarrowTy, PartRegs, LeftoverTy, LeftoverRegs);
1608
1609 MI.eraseFromParent();
1610 return Legalized;
1611 }
1612 case TargetOpcode::G_SEXT:
1613 case TargetOpcode::G_ZEXT:
1614 case TargetOpcode::G_ANYEXT:
1615 return narrowScalarExt(MI, TypeIdx, NarrowTy);
1616 case TargetOpcode::G_TRUNC: {
1617 if (TypeIdx != 1)
1618 return UnableToLegalize;
1619
1620 uint64_t SizeOp1 = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
1621 if (NarrowTy.getSizeInBits() * 2 != SizeOp1) {
1622 LLVM_DEBUG(dbgs() << "Can't narrow trunc to type " << NarrowTy << "\n");
1623 return UnableToLegalize;
1624 }
1625
1626 auto Unmerge = MIRBuilder.buildUnmerge(NarrowTy, MI.getOperand(1));
1627 MIRBuilder.buildCopy(MI.getOperand(0), Unmerge.getReg(0));
1628 MI.eraseFromParent();
1629 return Legalized;
1630 }
1631 case TargetOpcode::G_CONSTANT_FOLD_BARRIER:
1632 case TargetOpcode::G_FREEZE: {
1633 if (TypeIdx != 0)
1634 return UnableToLegalize;
1635
1636 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
1637 // Should widen scalar first
1638 if (Ty.getSizeInBits() % NarrowTy.getSizeInBits() != 0)
1639 return UnableToLegalize;
1640
1641 auto Unmerge = MIRBuilder.buildUnmerge(NarrowTy, MI.getOperand(1).getReg());
1643 for (unsigned i = 0; i < Unmerge->getNumDefs(); ++i) {
1644 Parts.push_back(
1645 MIRBuilder.buildInstr(MI.getOpcode(), {NarrowTy}, {Unmerge.getReg(i)})
1646 .getReg(0));
1647 }
1648
1649 MIRBuilder.buildMergeLikeInstr(MI.getOperand(0).getReg(), Parts);
1650 MI.eraseFromParent();
1651 return Legalized;
1652 }
1653 case TargetOpcode::G_ADD:
1654 case TargetOpcode::G_SUB:
1655 case TargetOpcode::G_SADDO:
1656 case TargetOpcode::G_SSUBO:
1657 case TargetOpcode::G_SADDE:
1658 case TargetOpcode::G_SSUBE:
1659 case TargetOpcode::G_UADDO:
1660 case TargetOpcode::G_USUBO:
1661 case TargetOpcode::G_UADDE:
1662 case TargetOpcode::G_USUBE:
1663 return narrowScalarAddSub(MI, TypeIdx, NarrowTy);
1664 case TargetOpcode::G_MUL:
1665 case TargetOpcode::G_UMULH:
1666 return narrowScalarMul(MI, NarrowTy);
1667 case TargetOpcode::G_EXTRACT:
1668 return narrowScalarExtract(MI, TypeIdx, NarrowTy);
1669 case TargetOpcode::G_INSERT:
1670 return narrowScalarInsert(MI, TypeIdx, NarrowTy);
1671 case TargetOpcode::G_LOAD: {
1672 auto &LoadMI = cast<GLoad>(MI);
1673 Register DstReg = LoadMI.getDstReg();
1674 LLT DstTy = MRI.getType(DstReg);
1675 if (DstTy.isVector())
1676 return UnableToLegalize;
1677
1678 if (8 * LoadMI.getMemSize().getValue() != DstTy.getSizeInBits()) {
1679 Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
1680 MIRBuilder.buildLoad(TmpReg, LoadMI.getPointerReg(), LoadMI.getMMO());
1681 MIRBuilder.buildAnyExt(DstReg, TmpReg);
1682 LoadMI.eraseFromParent();
1683 return Legalized;
1684 }
1685
1686 return reduceLoadStoreWidth(LoadMI, TypeIdx, NarrowTy);
1687 }
1688 case TargetOpcode::G_ZEXTLOAD:
1689 case TargetOpcode::G_SEXTLOAD:
1690 case TargetOpcode::G_FPEXTLOAD: {
1691 auto &LoadMI = cast<GExtLoad>(MI);
1692 Register DstReg = LoadMI.getDstReg();
1693 Register PtrReg = LoadMI.getPointerReg();
1694
1695 Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
1696 auto &MMO = LoadMI.getMMO();
1697 unsigned MemSize = MMO.getSizeInBits().getValue();
1698
1699 if (MemSize == NarrowSize) {
1700 MIRBuilder.buildLoad(TmpReg, PtrReg, MMO);
1701 } else if (MemSize < NarrowSize) {
1702 MIRBuilder.buildLoadInstr(LoadMI.getOpcode(), TmpReg, PtrReg, MMO);
1703 } else if (MemSize > NarrowSize) {
1704 // FIXME: Need to split the load.
1705 return UnableToLegalize;
1706 }
1707
1708 if (isa<GZExtLoad>(LoadMI))
1709 MIRBuilder.buildZExt(DstReg, TmpReg);
1710 else if (isa<GSExtLoad>(LoadMI))
1711 MIRBuilder.buildSExt(DstReg, TmpReg);
1712 else
1713 MIRBuilder.buildFPExt(DstReg, TmpReg);
1714
1715 LoadMI.eraseFromParent();
1716 return Legalized;
1717 }
1718 case TargetOpcode::G_STORE: {
1719 auto &StoreMI = cast<GStore>(MI);
1720
1721 Register SrcReg = StoreMI.getValueReg();
1722 LLT SrcTy = MRI.getType(SrcReg);
1723 if (SrcTy.isVector())
1724 return UnableToLegalize;
1725
1726 int NumParts = SizeOp0 / NarrowSize;
1727 unsigned HandledSize = NumParts * NarrowTy.getSizeInBits();
1728 unsigned LeftoverBits = SrcTy.getSizeInBits() - HandledSize;
1729 if (SrcTy.isVector() && LeftoverBits != 0)
1730 return UnableToLegalize;
1731
1732 if (8 * StoreMI.getMemSize().getValue() != SrcTy.getSizeInBits()) {
1733 Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
1734 MIRBuilder.buildTrunc(TmpReg, SrcReg);
1735 MIRBuilder.buildStore(TmpReg, StoreMI.getPointerReg(), StoreMI.getMMO());
1736 StoreMI.eraseFromParent();
1737 return Legalized;
1738 }
1739
1740 return reduceLoadStoreWidth(StoreMI, 0, NarrowTy);
1741 }
1742 case TargetOpcode::G_FPTRUNCSTORE: {
1743 auto &StoreMI = cast<GFPTruncStore>(MI);
1744 Register SrcReg = StoreMI.getValueReg();
1745 Register PtrReg = StoreMI.getPointerReg();
1746
1747 auto &MMO = StoreMI.getMMO();
1748 unsigned MemSize = MMO.getSizeInBits().getValue();
1749 if (MemSize > NarrowSize) {
1750 return UnableToLegalize;
1751 }
1752
1753 auto TmpReg = MIRBuilder.buildFPTrunc(NarrowTy, SrcReg);
1754 if (MemSize == NarrowSize) {
1755 MIRBuilder.buildStore(TmpReg, PtrReg, MMO);
1756 } else if (MemSize < NarrowSize) {
1757 MIRBuilder.buildStoreInstr(TargetOpcode::G_FPTRUNCSTORE, TmpReg, PtrReg,
1758 MMO);
1759 }
1760
1761 StoreMI.eraseFromParent();
1762 return Legalized;
1763 }
1764 case TargetOpcode::G_SELECT:
1765 return narrowScalarSelect(MI, TypeIdx, NarrowTy);
1766 case TargetOpcode::G_AND:
1767 case TargetOpcode::G_OR:
1768 case TargetOpcode::G_XOR: {
1769 // Legalize bitwise operation:
1770 // A = BinOp<Ty> B, C
1771 // into:
1772 // B1, ..., BN = G_UNMERGE_VALUES B
1773 // C1, ..., CN = G_UNMERGE_VALUES C
1774 // A1 = BinOp<Ty/N> B1, C2
1775 // ...
1776 // AN = BinOp<Ty/N> BN, CN
1777 // A = G_MERGE_VALUES A1, ..., AN
1778 return narrowScalarBasic(MI, TypeIdx, NarrowTy);
1779 }
1780 case TargetOpcode::G_SHL:
1781 case TargetOpcode::G_LSHR:
1782 case TargetOpcode::G_ASHR:
1783 return narrowScalarShift(MI, TypeIdx, NarrowTy);
1784 case TargetOpcode::G_CTLZ:
1785 case TargetOpcode::G_CTLZ_ZERO_POISON:
1786 case TargetOpcode::G_CTTZ:
1787 case TargetOpcode::G_CTTZ_ZERO_POISON:
1788 case TargetOpcode::G_CTLS:
1789 case TargetOpcode::G_CTPOP:
1790 if (TypeIdx == 1)
1791 switch (MI.getOpcode()) {
1792 case TargetOpcode::G_CTLZ:
1793 case TargetOpcode::G_CTLZ_ZERO_POISON:
1794 return narrowScalarCTLZ(MI, TypeIdx, NarrowTy);
1795 case TargetOpcode::G_CTTZ:
1796 case TargetOpcode::G_CTTZ_ZERO_POISON:
1797 return narrowScalarCTTZ(MI, TypeIdx, NarrowTy);
1798 case TargetOpcode::G_CTPOP:
1799 return narrowScalarCTPOP(MI, TypeIdx, NarrowTy);
1800 case TargetOpcode::G_CTLS:
1801 return narrowScalarCTLS(MI, TypeIdx, NarrowTy);
1802 default:
1803 return UnableToLegalize;
1804 }
1805
1806 Observer.changingInstr(MI);
1807 narrowScalarDst(MI, NarrowTy, 0, TargetOpcode::G_ZEXT);
1808 Observer.changedInstr(MI);
1809 return Legalized;
1810 case TargetOpcode::G_INTTOPTR:
1811 if (TypeIdx != 1)
1812 return UnableToLegalize;
1813
1814 Observer.changingInstr(MI);
1815 narrowScalarSrc(MI, NarrowTy, 1);
1816 Observer.changedInstr(MI);
1817 return Legalized;
1818 case TargetOpcode::G_PTRTOINT:
1819 if (TypeIdx != 0)
1820 return UnableToLegalize;
1821
1822 Observer.changingInstr(MI);
1823 narrowScalarDst(MI, NarrowTy, 0, TargetOpcode::G_ZEXT);
1824 Observer.changedInstr(MI);
1825 return Legalized;
1826 case TargetOpcode::G_PHI: {
1827 // FIXME: add support for when SizeOp0 isn't an exact multiple of
1828 // NarrowSize.
1829 if (SizeOp0 % NarrowSize != 0)
1830 return UnableToLegalize;
1831
1832 unsigned NumParts = SizeOp0 / NarrowSize;
1833 SmallVector<Register, 2> DstRegs(NumParts);
1834 SmallVector<SmallVector<Register, 2>, 2> SrcRegs(MI.getNumOperands() / 2);
1835 Observer.changingInstr(MI);
1836 for (unsigned i = 1; i < MI.getNumOperands(); i += 2) {
1837 MachineBasicBlock &OpMBB = *MI.getOperand(i + 1).getMBB();
1838 MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminatorForward());
1839 extractParts(MI.getOperand(i).getReg(), NarrowTy, NumParts,
1840 SrcRegs[i / 2], MIRBuilder, MRI);
1841 }
1842 MachineBasicBlock &MBB = *MI.getParent();
1843 MIRBuilder.setInsertPt(MBB, MI);
1844 for (unsigned i = 0; i < NumParts; ++i) {
1845 DstRegs[i] = MRI.createGenericVirtualRegister(NarrowTy);
1847 MIRBuilder.buildInstr(TargetOpcode::G_PHI).addDef(DstRegs[i]);
1848 for (unsigned j = 1; j < MI.getNumOperands(); j += 2)
1849 MIB.addUse(SrcRegs[j / 2][i]).add(MI.getOperand(j + 1));
1850 }
1851 MIRBuilder.setInsertPt(MBB, MBB.getFirstNonPHI());
1852 MIRBuilder.buildMergeLikeInstr(MI.getOperand(0), DstRegs);
1853 Observer.changedInstr(MI);
1854 MI.eraseFromParent();
1855 return Legalized;
1856 }
1857 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
1858 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1859 if (TypeIdx != 2)
1860 return UnableToLegalize;
1861
1862 int OpIdx = MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ? 2 : 3;
1863 Observer.changingInstr(MI);
1864 narrowScalarSrc(MI, NarrowTy, OpIdx);
1865 Observer.changedInstr(MI);
1866 return Legalized;
1867 }
1868 case TargetOpcode::G_ICMP: {
1869 Register LHS = MI.getOperand(2).getReg();
1870 LLT SrcTy = MRI.getType(LHS);
1871 CmpInst::Predicate Pred =
1872 static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
1873
1874 LLT LeftoverTy; // Example: s88 -> s64 (NarrowTy) + s24 (leftover)
1875 SmallVector<Register, 4> LHSPartRegs, LHSLeftoverRegs;
1876 if (!extractParts(LHS, SrcTy, NarrowTy, LeftoverTy, LHSPartRegs,
1877 LHSLeftoverRegs, MIRBuilder, MRI))
1878 return UnableToLegalize;
1879
1880 LLT Unused; // Matches LeftoverTy; G_ICMP LHS and RHS are the same type.
1881 SmallVector<Register, 4> RHSPartRegs, RHSLeftoverRegs;
1882 if (!extractParts(MI.getOperand(3).getReg(), SrcTy, NarrowTy, Unused,
1883 RHSPartRegs, RHSLeftoverRegs, MIRBuilder, MRI))
1884 return UnableToLegalize;
1885
1886 // We now have the LHS and RHS of the compare split into narrow-type
1887 // registers, plus potentially some leftover type.
1888 Register Dst = MI.getOperand(0).getReg();
1889 LLT ResTy = MRI.getType(Dst);
1890 if (ICmpInst::isEquality(Pred)) {
1891 // For each part on the LHS and RHS, keep track of the result of XOR-ing
1892 // them together. For each equal part, the result should be all 0s. For
1893 // each non-equal part, we'll get at least one 1.
1894 auto Zero = MIRBuilder.buildConstant(NarrowTy, 0);
1896 for (auto LHSAndRHS : zip(LHSPartRegs, RHSPartRegs)) {
1897 auto LHS = std::get<0>(LHSAndRHS);
1898 auto RHS = std::get<1>(LHSAndRHS);
1899 auto Xor = MIRBuilder.buildXor(NarrowTy, LHS, RHS).getReg(0);
1900 Xors.push_back(Xor);
1901 }
1902
1903 // Build a G_XOR for each leftover register. Each G_XOR must be widened
1904 // to the desired narrow type so that we can OR them together later.
1905 SmallVector<Register, 4> WidenedXors;
1906 for (auto LHSAndRHS : zip(LHSLeftoverRegs, RHSLeftoverRegs)) {
1907 auto LHS = std::get<0>(LHSAndRHS);
1908 auto RHS = std::get<1>(LHSAndRHS);
1909 auto Xor = MIRBuilder.buildXor(LeftoverTy, LHS, RHS).getReg(0);
1910 LLT GCDTy = extractGCDType(WidenedXors, NarrowTy, LeftoverTy, Xor);
1911 buildLCMMergePieces(LeftoverTy, NarrowTy, GCDTy, WidenedXors,
1912 /* PadStrategy = */ TargetOpcode::G_ZEXT);
1913 llvm::append_range(Xors, WidenedXors);
1914 }
1915
1916 // Now, for each part we broke up, we know if they are equal/not equal
1917 // based off the G_XOR. We can OR these all together and compare against
1918 // 0 to get the result.
1919 assert(Xors.size() >= 2 && "Should have gotten at least two Xors?");
1920 auto Or = MIRBuilder.buildOr(NarrowTy, Xors[0], Xors[1]);
1921 for (unsigned I = 2, E = Xors.size(); I < E; ++I)
1922 Or = MIRBuilder.buildOr(NarrowTy, Or, Xors[I]);
1923 MIRBuilder.buildICmp(Pred, Dst, Or, Zero);
1924 } else {
1925 Register CmpIn;
1926 for (unsigned I = 0, E = LHSPartRegs.size(); I != E; ++I) {
1927 Register CmpOut;
1928 CmpInst::Predicate PartPred;
1929
1930 if (I == E - 1 && LHSLeftoverRegs.empty()) {
1931 PartPred = Pred;
1932 CmpOut = Dst;
1933 } else {
1934 PartPred = ICmpInst::getUnsignedPredicate(Pred);
1935 CmpOut = MRI.createGenericVirtualRegister(ResTy);
1936 }
1937
1938 if (!CmpIn) {
1939 MIRBuilder.buildICmp(PartPred, CmpOut, LHSPartRegs[I],
1940 RHSPartRegs[I]);
1941 } else {
1942 auto Cmp = MIRBuilder.buildICmp(PartPred, ResTy, LHSPartRegs[I],
1943 RHSPartRegs[I]);
1944 auto CmpEq = MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, ResTy,
1945 LHSPartRegs[I], RHSPartRegs[I]);
1946 MIRBuilder.buildSelect(CmpOut, CmpEq, CmpIn, Cmp);
1947 }
1948
1949 CmpIn = CmpOut;
1950 }
1951
1952 for (unsigned I = 0, E = LHSLeftoverRegs.size(); I != E; ++I) {
1953 Register CmpOut;
1954 CmpInst::Predicate PartPred;
1955
1956 if (I == E - 1) {
1957 PartPred = Pred;
1958 CmpOut = Dst;
1959 } else {
1960 PartPred = ICmpInst::getUnsignedPredicate(Pred);
1961 CmpOut = MRI.createGenericVirtualRegister(ResTy);
1962 }
1963
1964 if (!CmpIn) {
1965 MIRBuilder.buildICmp(PartPred, CmpOut, LHSLeftoverRegs[I],
1966 RHSLeftoverRegs[I]);
1967 } else {
1968 auto Cmp = MIRBuilder.buildICmp(PartPred, ResTy, LHSLeftoverRegs[I],
1969 RHSLeftoverRegs[I]);
1970 auto CmpEq =
1971 MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, ResTy,
1972 LHSLeftoverRegs[I], RHSLeftoverRegs[I]);
1973 MIRBuilder.buildSelect(CmpOut, CmpEq, CmpIn, Cmp);
1974 }
1975
1976 CmpIn = CmpOut;
1977 }
1978 }
1979 MI.eraseFromParent();
1980 return Legalized;
1981 }
1982 case TargetOpcode::G_FCMP:
1983 if (TypeIdx != 0)
1984 return UnableToLegalize;
1985
1986 Observer.changingInstr(MI);
1987 narrowScalarDst(MI, NarrowTy, 0, TargetOpcode::G_ZEXT);
1988 Observer.changedInstr(MI);
1989 return Legalized;
1990
1991 case TargetOpcode::G_SEXT_INREG: {
1992 if (TypeIdx != 0)
1993 return UnableToLegalize;
1994
1995 int64_t SizeInBits = MI.getOperand(2).getImm();
1996
1997 // So long as the new type has more bits than the bits we're extending we
1998 // don't need to break it apart.
1999 if (NarrowTy.getScalarSizeInBits() > SizeInBits) {
2000 Observer.changingInstr(MI);
2001 // We don't lose any non-extension bits by truncating the src and
2002 // sign-extending the dst.
2003 MachineOperand &MO1 = MI.getOperand(1);
2004 auto TruncMIB = MIRBuilder.buildTrunc(NarrowTy, MO1);
2005 MO1.setReg(TruncMIB.getReg(0));
2006
2007 MachineOperand &MO2 = MI.getOperand(0);
2008 Register DstExt = MRI.createGenericVirtualRegister(NarrowTy);
2009 MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
2010 MIRBuilder.buildSExt(MO2, DstExt);
2011 MO2.setReg(DstExt);
2012 Observer.changedInstr(MI);
2013 return Legalized;
2014 }
2015
2016 // Break it apart. Components below the extension point are unmodified. The
2017 // component containing the extension point becomes a narrower SEXT_INREG.
2018 // Components above it are ashr'd from the component containing the
2019 // extension point.
2020 if (SizeOp0 % NarrowSize != 0)
2021 return UnableToLegalize;
2022 int NumParts = SizeOp0 / NarrowSize;
2023
2024 // List the registers where the destination will be scattered.
2026 // List the registers where the source will be split.
2028
2029 // Create all the temporary registers.
2030 for (int i = 0; i < NumParts; ++i) {
2031 Register SrcReg = MRI.createGenericVirtualRegister(NarrowTy);
2032
2033 SrcRegs.push_back(SrcReg);
2034 }
2035
2036 // Explode the big arguments into smaller chunks.
2037 MIRBuilder.buildUnmerge(SrcRegs, MI.getOperand(1));
2038
2039 Register AshrCstReg =
2040 MIRBuilder.buildConstant(NarrowTy, NarrowTy.getScalarSizeInBits() - 1)
2041 .getReg(0);
2042 Register FullExtensionReg;
2043 Register PartialExtensionReg;
2044
2045 // Do the operation on each small part.
2046 for (int i = 0; i < NumParts; ++i) {
2047 if ((i + 1) * NarrowTy.getScalarSizeInBits() <= SizeInBits) {
2048 DstRegs.push_back(SrcRegs[i]);
2049 PartialExtensionReg = DstRegs.back();
2050 } else if (i * NarrowTy.getScalarSizeInBits() >= SizeInBits) {
2051 assert(PartialExtensionReg &&
2052 "Expected to visit partial extension before full");
2053 if (FullExtensionReg) {
2054 DstRegs.push_back(FullExtensionReg);
2055 continue;
2056 }
2057 DstRegs.push_back(
2058 MIRBuilder.buildAShr(NarrowTy, PartialExtensionReg, AshrCstReg)
2059 .getReg(0));
2060 FullExtensionReg = DstRegs.back();
2061 } else {
2062 DstRegs.push_back(
2064 .buildInstr(
2065 TargetOpcode::G_SEXT_INREG, {NarrowTy},
2066 {SrcRegs[i], SizeInBits % NarrowTy.getScalarSizeInBits()})
2067 .getReg(0));
2068 PartialExtensionReg = DstRegs.back();
2069 }
2070 }
2071
2072 // Gather the destination registers into the final destination.
2073 Register DstReg = MI.getOperand(0).getReg();
2074 MIRBuilder.buildMergeLikeInstr(DstReg, DstRegs);
2075 MI.eraseFromParent();
2076 return Legalized;
2077 }
2078 case TargetOpcode::G_BSWAP:
2079 case TargetOpcode::G_BITREVERSE: {
2080 if (SizeOp0 % NarrowSize != 0)
2081 return UnableToLegalize;
2082
2083 Observer.changingInstr(MI);
2084 SmallVector<Register, 2> SrcRegs, DstRegs;
2085 unsigned NumParts = SizeOp0 / NarrowSize;
2086 extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs,
2087 MIRBuilder, MRI);
2088
2089 for (unsigned i = 0; i < NumParts; ++i) {
2090 auto DstPart = MIRBuilder.buildInstr(MI.getOpcode(), {NarrowTy},
2091 {SrcRegs[NumParts - 1 - i]});
2092 DstRegs.push_back(DstPart.getReg(0));
2093 }
2094
2095 MIRBuilder.buildMergeLikeInstr(MI.getOperand(0), DstRegs);
2096
2097 Observer.changedInstr(MI);
2098 MI.eraseFromParent();
2099 return Legalized;
2100 }
2101 case TargetOpcode::G_PTR_ADD:
2102 case TargetOpcode::G_PTRMASK: {
2103 if (TypeIdx != 1)
2104 return UnableToLegalize;
2105 Observer.changingInstr(MI);
2106 narrowScalarSrc(MI, NarrowTy, 2);
2107 Observer.changedInstr(MI);
2108 return Legalized;
2109 }
2110 case TargetOpcode::G_FPTOUI:
2111 case TargetOpcode::G_FPTOSI:
2112 case TargetOpcode::G_FPTOUI_SAT:
2113 case TargetOpcode::G_FPTOSI_SAT:
2114 return narrowScalarFPTOI(MI, TypeIdx, NarrowTy);
2115 case TargetOpcode::G_FPEXT:
2116 if (TypeIdx != 0)
2117 return UnableToLegalize;
2118 Observer.changingInstr(MI);
2119 narrowScalarDst(MI, NarrowTy, 0, TargetOpcode::G_FPEXT);
2120 Observer.changedInstr(MI);
2121 return Legalized;
2122 case TargetOpcode::G_FLDEXP:
2123 case TargetOpcode::G_STRICT_FLDEXP:
2124 return narrowScalarFLDEXP(MI, TypeIdx, NarrowTy);
2125 case TargetOpcode::G_VSCALE: {
2126 Register Dst = MI.getOperand(0).getReg();
2127 LLT Ty = MRI.getType(Dst);
2128
2129 // Assume VSCALE(1) fits into a legal integer
2130 const APInt One(NarrowTy.getSizeInBits(), 1);
2131 auto VScaleBase = MIRBuilder.buildVScale(NarrowTy, One);
2132 auto ZExt = MIRBuilder.buildZExt(Ty, VScaleBase);
2133 auto C = MIRBuilder.buildConstant(Ty, *MI.getOperand(1).getCImm());
2134 MIRBuilder.buildMul(Dst, ZExt, C);
2135
2136 MI.eraseFromParent();
2137 return Legalized;
2138 }
2139 }
2140}
2141
2143 LLT Ty = MRI.getType(Val);
2144 if (Ty.isScalar())
2145 return Val;
2146
2147 const DataLayout &DL = MIRBuilder.getDataLayout();
2148 LLT NewTy = LLT::scalar(Ty.getSizeInBits());
2149 if (Ty.isPointer()) {
2150 if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace()))
2151 return Register();
2152 return MIRBuilder.buildPtrToInt(NewTy, Val).getReg(0);
2153 }
2154
2155 Register NewVal = Val;
2156
2157 assert(Ty.isVector());
2158 if (Ty.isPointerVector())
2159 NewVal = MIRBuilder.buildPtrToInt(NewTy, NewVal).getReg(0);
2160 return MIRBuilder.buildBitcast(NewTy, NewVal).getReg(0);
2161}
2162
2164 unsigned OpIdx, unsigned ExtOpcode) {
2165 MachineOperand &MO = MI.getOperand(OpIdx);
2166 auto ExtB = MIRBuilder.buildInstr(ExtOpcode, {WideTy}, {MO});
2167 MO.setReg(ExtB.getReg(0));
2168}
2169
2171 unsigned OpIdx) {
2172 MachineOperand &MO = MI.getOperand(OpIdx);
2173 auto ExtB = MIRBuilder.buildTrunc(NarrowTy, MO);
2174 MO.setReg(ExtB.getReg(0));
2175}
2176
2178 unsigned OpIdx, unsigned TruncOpcode) {
2179 MachineOperand &MO = MI.getOperand(OpIdx);
2180 Register DstExt = MRI.createGenericVirtualRegister(WideTy);
2181 MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
2182 MIRBuilder.buildInstr(TruncOpcode, {MO}, {DstExt});
2183 MO.setReg(DstExt);
2184}
2185
2187 unsigned OpIdx, unsigned ExtOpcode) {
2188 MachineOperand &MO = MI.getOperand(OpIdx);
2189 Register DstTrunc = MRI.createGenericVirtualRegister(NarrowTy);
2190 MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
2191 MIRBuilder.buildInstr(ExtOpcode, {MO}, {DstTrunc});
2192 MO.setReg(DstTrunc);
2193}
2194
2196 unsigned OpIdx) {
2197 MachineOperand &MO = MI.getOperand(OpIdx);
2198 MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
2199 Register Dst = MO.getReg();
2200 Register DstExt = MRI.createGenericVirtualRegister(WideTy);
2201 MO.setReg(DstExt);
2202 MIRBuilder.buildDeleteTrailingVectorElements(Dst, DstExt);
2203}
2204
2206 unsigned OpIdx) {
2207 MachineOperand &MO = MI.getOperand(OpIdx);
2208 MO.setReg(MIRBuilder.buildPadVectorWithUndefElements(MoreTy, MO).getReg(0));
2209}
2210
2212 MachineOperand &Op = MI.getOperand(OpIdx);
2213 Op.setReg(MIRBuilder.buildBitcast(CastTy, Op).getReg(0));
2214}
2215
2217 MachineOperand &MO = MI.getOperand(OpIdx);
2218 Register CastDst = MRI.createGenericVirtualRegister(CastTy);
2219 MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
2220 MIRBuilder.buildBitcast(MO, CastDst);
2221 MO.setReg(CastDst);
2222}
2223
2225LegalizerHelper::widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx,
2226 LLT WideTy) {
2227 if (TypeIdx != 1)
2228 return UnableToLegalize;
2229
2230 auto [DstReg, DstTy, Src1Reg, Src1Ty] = MI.getFirst2RegLLTs();
2231 if (DstTy.isVector())
2232 return UnableToLegalize;
2233
2234 LLT SrcTy = MRI.getType(Src1Reg);
2235 const int DstSize = DstTy.getSizeInBits();
2236 const int SrcSize = SrcTy.getSizeInBits();
2237 const int WideSize = WideTy.getSizeInBits();
2238 const int NumMerge = (DstSize + WideSize - 1) / WideSize;
2239
2240 unsigned NumOps = MI.getNumOperands();
2241 unsigned NumSrc = MI.getNumOperands() - 1;
2242 unsigned PartSize = DstTy.getSizeInBits() / NumSrc;
2243
2244 if (WideSize >= DstSize) {
2245 // Directly pack the bits in the target type.
2246 Register ResultReg = MIRBuilder.buildZExt(WideTy, Src1Reg).getReg(0);
2247
2248 for (unsigned I = 2; I != NumOps; ++I) {
2249 const unsigned Offset = (I - 1) * PartSize;
2250
2251 Register SrcReg = MI.getOperand(I).getReg();
2252 assert(MRI.getType(SrcReg) == LLT::scalar(PartSize));
2253
2254 auto ZextInput = MIRBuilder.buildZExt(WideTy, SrcReg);
2255
2256 Register NextResult = I + 1 == NumOps && WideTy == DstTy ? DstReg :
2257 MRI.createGenericVirtualRegister(WideTy);
2258
2259 auto ShiftAmt = MIRBuilder.buildConstant(WideTy, Offset);
2260 auto Shl = MIRBuilder.buildShl(WideTy, ZextInput, ShiftAmt);
2261 MIRBuilder.buildOr(NextResult, ResultReg, Shl);
2262 ResultReg = NextResult;
2263 }
2264
2265 if (WideSize > DstSize)
2266 MIRBuilder.buildTrunc(DstReg, ResultReg);
2267 else if (DstTy.isPointer())
2268 MIRBuilder.buildIntToPtr(DstReg, ResultReg);
2269
2270 MI.eraseFromParent();
2271 return Legalized;
2272 }
2273
2274 // Unmerge the original values to the GCD type, and recombine to the next
2275 // multiple greater than the original type.
2276 //
2277 // %3:_(s12) = G_MERGE_VALUES %0:_(s4), %1:_(s4), %2:_(s4) -> s6
2278 // %4:_(s2), %5:_(s2) = G_UNMERGE_VALUES %0
2279 // %6:_(s2), %7:_(s2) = G_UNMERGE_VALUES %1
2280 // %8:_(s2), %9:_(s2) = G_UNMERGE_VALUES %2
2281 // %10:_(s6) = G_MERGE_VALUES %4, %5, %6
2282 // %11:_(s6) = G_MERGE_VALUES %7, %8, %9
2283 // %12:_(s12) = G_MERGE_VALUES %10, %11
2284 //
2285 // Padding with undef if necessary:
2286 //
2287 // %2:_(s8) = G_MERGE_VALUES %0:_(s4), %1:_(s4) -> s6
2288 // %3:_(s2), %4:_(s2) = G_UNMERGE_VALUES %0
2289 // %5:_(s2), %6:_(s2) = G_UNMERGE_VALUES %1
2290 // %7:_(s2) = G_IMPLICIT_DEF
2291 // %8:_(s6) = G_MERGE_VALUES %3, %4, %5
2292 // %9:_(s6) = G_MERGE_VALUES %6, %7, %7
2293 // %10:_(s12) = G_MERGE_VALUES %8, %9
2294
2295 const int GCD = std::gcd(SrcSize, WideSize);
2296 LLT GCDTy = LLT::scalar(GCD);
2297
2298 SmallVector<Register, 8> NewMergeRegs;
2299 SmallVector<Register, 8> Unmerges;
2300 LLT WideDstTy = LLT::scalar(NumMerge * WideSize);
2301
2302 // Decompose the original operands if they don't evenly divide.
2303 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
2304 Register SrcReg = MO.getReg();
2305 if (GCD == SrcSize) {
2306 Unmerges.push_back(SrcReg);
2307 } else {
2308 auto Unmerge = MIRBuilder.buildUnmerge(GCDTy, SrcReg);
2309 for (int J = 0, JE = Unmerge->getNumOperands() - 1; J != JE; ++J)
2310 Unmerges.push_back(Unmerge.getReg(J));
2311 }
2312 }
2313
2314 // Pad with undef to the next size that is a multiple of the requested size.
2315 if (static_cast<int>(Unmerges.size()) != NumMerge * WideSize) {
2316 Register UndefReg = MIRBuilder.buildUndef(GCDTy).getReg(0);
2317 for (int I = Unmerges.size(); I != NumMerge * WideSize; ++I)
2318 Unmerges.push_back(UndefReg);
2319 }
2320
2321 const int PartsPerGCD = WideSize / GCD;
2322
2323 // Build merges of each piece.
2324 ArrayRef<Register> Slicer(Unmerges);
2325 for (int I = 0; I != NumMerge; ++I, Slicer = Slicer.drop_front(PartsPerGCD)) {
2326 auto Merge =
2327 MIRBuilder.buildMergeLikeInstr(WideTy, Slicer.take_front(PartsPerGCD));
2328 NewMergeRegs.push_back(Merge.getReg(0));
2329 }
2330
2331 // A truncate may be necessary if the requested type doesn't evenly divide the
2332 // original result type.
2333 if (DstTy.getSizeInBits() == WideDstTy.getSizeInBits()) {
2334 MIRBuilder.buildMergeLikeInstr(DstReg, NewMergeRegs);
2335 } else {
2336 auto FinalMerge = MIRBuilder.buildMergeLikeInstr(WideDstTy, NewMergeRegs);
2337 MIRBuilder.buildTrunc(DstReg, FinalMerge.getReg(0));
2338 }
2339
2340 MI.eraseFromParent();
2341 return Legalized;
2342}
2343
2345LegalizerHelper::widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx,
2346 LLT WideTy) {
2347 if (TypeIdx != 0)
2348 return UnableToLegalize;
2349
2350 int NumDst = MI.getNumOperands() - 1;
2351 Register SrcReg = MI.getOperand(NumDst).getReg();
2352 LLT SrcTy = MRI.getType(SrcReg);
2353 if (SrcTy.isVector())
2354 return UnableToLegalize;
2355
2356 Register Dst0Reg = MI.getOperand(0).getReg();
2357 LLT DstTy = MRI.getType(Dst0Reg);
2358 if (!DstTy.isScalar())
2359 return UnableToLegalize;
2360
2361 if (WideTy.getSizeInBits() >= SrcTy.getSizeInBits()) {
2362 if (SrcTy.isPointer()) {
2363 const DataLayout &DL = MIRBuilder.getDataLayout();
2364 if (DL.isNonIntegralAddressSpace(SrcTy.getAddressSpace())) {
2365 LLVM_DEBUG(
2366 dbgs() << "Not casting non-integral address space integer\n");
2367 return UnableToLegalize;
2368 }
2369
2370 SrcTy = LLT::scalar(SrcTy.getSizeInBits());
2371 SrcReg = MIRBuilder.buildPtrToInt(SrcTy, SrcReg).getReg(0);
2372 }
2373
2374 // Widen SrcTy to WideTy. This does not affect the result, but since the
2375 // user requested this size, it is probably better handled than SrcTy and
2376 // should reduce the total number of legalization artifacts.
2377 if (WideTy.getSizeInBits() > SrcTy.getSizeInBits()) {
2378 SrcTy = WideTy;
2379 SrcReg = MIRBuilder.buildAnyExt(WideTy, SrcReg).getReg(0);
2380 }
2381
2382 // Theres no unmerge type to target. Directly extract the bits from the
2383 // source type
2384 unsigned DstSize = DstTy.getSizeInBits();
2385
2386 MIRBuilder.buildTrunc(Dst0Reg, SrcReg);
2387 for (int I = 1; I != NumDst; ++I) {
2388 auto ShiftAmt = MIRBuilder.buildConstant(SrcTy, DstSize * I);
2389 auto Shr = MIRBuilder.buildLShr(SrcTy, SrcReg, ShiftAmt);
2390 MIRBuilder.buildTrunc(MI.getOperand(I), Shr);
2391 }
2392
2393 MI.eraseFromParent();
2394 return Legalized;
2395 }
2396
2397 // Extend the source to a wider type.
2398 LLT LCMTy = getLCMType(SrcTy, WideTy);
2399
2400 Register WideSrc = SrcReg;
2401 if (LCMTy.getSizeInBits() != SrcTy.getSizeInBits()) {
2402 // TODO: If this is an integral address space, cast to integer and anyext.
2403 if (SrcTy.isPointer()) {
2404 LLVM_DEBUG(dbgs() << "Widening pointer source types not implemented\n");
2405 return UnableToLegalize;
2406 }
2407
2408 WideSrc = MIRBuilder.buildAnyExt(LCMTy, WideSrc).getReg(0);
2409 }
2410
2411 auto Unmerge = MIRBuilder.buildUnmerge(WideTy, WideSrc);
2412
2413 // Create a sequence of unmerges and merges to the original results. Since we
2414 // may have widened the source, we will need to pad the results with dead defs
2415 // to cover the source register.
2416 // e.g. widen s48 to s64:
2417 // %1:_(s48), %2:_(s48) = G_UNMERGE_VALUES %0:_(s96)
2418 //
2419 // =>
2420 // %4:_(s192) = G_ANYEXT %0:_(s96)
2421 // %5:_(s64), %6, %7 = G_UNMERGE_VALUES %4 ; Requested unmerge
2422 // ; unpack to GCD type, with extra dead defs
2423 // %8:_(s16), %9, %10, %11 = G_UNMERGE_VALUES %5:_(s64)
2424 // %12:_(s16), %13, dead %14, dead %15 = G_UNMERGE_VALUES %6:_(s64)
2425 // dead %16:_(s16), dead %17, dead %18, dead %18 = G_UNMERGE_VALUES %7:_(s64)
2426 // %1:_(s48) = G_MERGE_VALUES %8:_(s16), %9, %10 ; Remerge to destination
2427 // %2:_(s48) = G_MERGE_VALUES %11:_(s16), %12, %13 ; Remerge to destination
2428 const LLT GCDTy = getGCDType(WideTy, DstTy);
2429 const int NumUnmerge = Unmerge->getNumOperands() - 1;
2430 const int PartsPerRemerge = DstTy.getSizeInBits() / GCDTy.getSizeInBits();
2431
2432 // Directly unmerge to the destination without going through a GCD type
2433 // if possible
2434 if (PartsPerRemerge == 1) {
2435 const int PartsPerUnmerge = WideTy.getSizeInBits() / DstTy.getSizeInBits();
2436
2437 for (int I = 0; I != NumUnmerge; ++I) {
2438 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_UNMERGE_VALUES);
2439
2440 for (int J = 0; J != PartsPerUnmerge; ++J) {
2441 int Idx = I * PartsPerUnmerge + J;
2442 if (Idx < NumDst)
2443 MIB.addDef(MI.getOperand(Idx).getReg());
2444 else {
2445 // Create dead def for excess components.
2446 MIB.addDef(MRI.createGenericVirtualRegister(DstTy));
2447 }
2448 }
2449
2450 MIB.addUse(Unmerge.getReg(I));
2451 }
2452 } else {
2453 SmallVector<Register, 16> Parts;
2454 for (int J = 0; J != NumUnmerge; ++J)
2455 extractGCDType(Parts, GCDTy, Unmerge.getReg(J));
2456
2457 SmallVector<Register, 8> RemergeParts;
2458 for (int I = 0; I != NumDst; ++I) {
2459 for (int J = 0; J < PartsPerRemerge; ++J) {
2460 const int Idx = I * PartsPerRemerge + J;
2461 RemergeParts.emplace_back(Parts[Idx]);
2462 }
2463
2464 MIRBuilder.buildMergeLikeInstr(MI.getOperand(I).getReg(), RemergeParts);
2465 RemergeParts.clear();
2466 }
2467 }
2468
2469 MI.eraseFromParent();
2470 return Legalized;
2471}
2472
2474LegalizerHelper::widenScalarExtract(MachineInstr &MI, unsigned TypeIdx,
2475 LLT WideTy) {
2476 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
2477 unsigned Offset = MI.getOperand(2).getImm();
2478
2479 if (TypeIdx == 0) {
2480 if (SrcTy.isVector() || DstTy.isVector())
2481 return UnableToLegalize;
2482
2483 SrcOp Src(SrcReg);
2484 if (SrcTy.isPointer()) {
2485 // Extracts from pointers can be handled only if they are really just
2486 // simple integers.
2487 const DataLayout &DL = MIRBuilder.getDataLayout();
2488 if (DL.isNonIntegralAddressSpace(SrcTy.getAddressSpace()))
2489 return UnableToLegalize;
2490
2491 LLT SrcAsIntTy = LLT::scalar(SrcTy.getSizeInBits());
2492 Src = MIRBuilder.buildPtrToInt(SrcAsIntTy, Src);
2493 SrcTy = SrcAsIntTy;
2494 }
2495
2496 if (DstTy.isPointer())
2497 return UnableToLegalize;
2498
2499 if (Offset == 0) {
2500 // Avoid a shift in the degenerate case.
2501 MIRBuilder.buildTrunc(DstReg,
2502 MIRBuilder.buildAnyExtOrTrunc(WideTy, Src));
2503 MI.eraseFromParent();
2504 return Legalized;
2505 }
2506
2507 // Do a shift in the source type.
2508 LLT ShiftTy = SrcTy;
2509 if (WideTy.getSizeInBits() > SrcTy.getSizeInBits()) {
2510 Src = MIRBuilder.buildAnyExt(WideTy, Src);
2511 ShiftTy = WideTy;
2512 }
2513
2514 auto LShr = MIRBuilder.buildLShr(
2515 ShiftTy, Src, MIRBuilder.buildConstant(ShiftTy, Offset));
2516 MIRBuilder.buildTrunc(DstReg, LShr);
2517 MI.eraseFromParent();
2518 return Legalized;
2519 }
2520
2521 if (SrcTy.isScalar()) {
2522 Observer.changingInstr(MI);
2523 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
2524 Observer.changedInstr(MI);
2525 return Legalized;
2526 }
2527
2528 if (!SrcTy.isVector())
2529 return UnableToLegalize;
2530
2531 if (DstTy != SrcTy.getElementType())
2532 return UnableToLegalize;
2533
2534 if (Offset % SrcTy.getScalarSizeInBits() != 0)
2535 return UnableToLegalize;
2536
2537 Observer.changingInstr(MI);
2538 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
2539
2540 MI.getOperand(2).setImm((WideTy.getSizeInBits() / SrcTy.getSizeInBits()) *
2541 Offset);
2542 widenScalarDst(MI, WideTy.getScalarType(), 0);
2543 Observer.changedInstr(MI);
2544 return Legalized;
2545}
2546
2548LegalizerHelper::widenScalarInsert(MachineInstr &MI, unsigned TypeIdx,
2549 LLT WideTy) {
2550 if (TypeIdx != 0 || WideTy.isVector())
2551 return UnableToLegalize;
2552 Observer.changingInstr(MI);
2553 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
2554 widenScalarDst(MI, WideTy);
2555 Observer.changedInstr(MI);
2556 return Legalized;
2557}
2558
2560LegalizerHelper::widenScalarAddSubOverflow(MachineInstr &MI, unsigned TypeIdx,
2561 LLT WideTy) {
2562 unsigned Opcode;
2563 unsigned ExtOpcode;
2564 std::optional<Register> CarryIn;
2565 switch (MI.getOpcode()) {
2566 default:
2567 llvm_unreachable("Unexpected opcode!");
2568 case TargetOpcode::G_SADDO:
2569 Opcode = TargetOpcode::G_ADD;
2570 ExtOpcode = TargetOpcode::G_SEXT;
2571 break;
2572 case TargetOpcode::G_SSUBO:
2573 Opcode = TargetOpcode::G_SUB;
2574 ExtOpcode = TargetOpcode::G_SEXT;
2575 break;
2576 case TargetOpcode::G_UADDO:
2577 Opcode = TargetOpcode::G_ADD;
2578 ExtOpcode = TargetOpcode::G_ZEXT;
2579 break;
2580 case TargetOpcode::G_USUBO:
2581 Opcode = TargetOpcode::G_SUB;
2582 ExtOpcode = TargetOpcode::G_ZEXT;
2583 break;
2584 case TargetOpcode::G_SADDE:
2585 Opcode = TargetOpcode::G_UADDE;
2586 ExtOpcode = TargetOpcode::G_SEXT;
2587 CarryIn = MI.getOperand(4).getReg();
2588 break;
2589 case TargetOpcode::G_SSUBE:
2590 Opcode = TargetOpcode::G_USUBE;
2591 ExtOpcode = TargetOpcode::G_SEXT;
2592 CarryIn = MI.getOperand(4).getReg();
2593 break;
2594 case TargetOpcode::G_UADDE:
2595 Opcode = TargetOpcode::G_UADDE;
2596 ExtOpcode = TargetOpcode::G_ZEXT;
2597 CarryIn = MI.getOperand(4).getReg();
2598 break;
2599 case TargetOpcode::G_USUBE:
2600 Opcode = TargetOpcode::G_USUBE;
2601 ExtOpcode = TargetOpcode::G_ZEXT;
2602 CarryIn = MI.getOperand(4).getReg();
2603 break;
2604 }
2605
2606 if (TypeIdx == 1) {
2607 unsigned BoolExtOp = MIRBuilder.getBoolExtOp(WideTy.isVector(), false);
2608
2609 Observer.changingInstr(MI);
2610 if (CarryIn)
2611 widenScalarSrc(MI, WideTy, 4, BoolExtOp);
2612 widenScalarDst(MI, WideTy, 1);
2613
2614 Observer.changedInstr(MI);
2615 return Legalized;
2616 }
2617
2618 auto LHSExt = MIRBuilder.buildInstr(ExtOpcode, {WideTy}, {MI.getOperand(2)});
2619 auto RHSExt = MIRBuilder.buildInstr(ExtOpcode, {WideTy}, {MI.getOperand(3)});
2620 // Do the arithmetic in the larger type.
2621 Register NewOp;
2622 if (CarryIn) {
2623 LLT CarryOutTy = MRI.getType(MI.getOperand(1).getReg());
2624 NewOp = MIRBuilder
2625 .buildInstr(Opcode, {WideTy, CarryOutTy},
2626 {LHSExt, RHSExt, *CarryIn})
2627 .getReg(0);
2628 } else {
2629 NewOp = MIRBuilder.buildInstr(Opcode, {WideTy}, {LHSExt, RHSExt}).getReg(0);
2630 }
2631 LLT OrigTy = MRI.getType(MI.getOperand(0).getReg());
2632 auto TruncOp = MIRBuilder.buildTrunc(OrigTy, NewOp);
2633 auto ExtOp = MIRBuilder.buildInstr(ExtOpcode, {WideTy}, {TruncOp});
2634 // There is no overflow if the ExtOp is the same as NewOp.
2635 MIRBuilder.buildICmp(CmpInst::ICMP_NE, MI.getOperand(1), NewOp, ExtOp);
2636 // Now trunc the NewOp to the original result.
2637 MIRBuilder.buildTrunc(MI.getOperand(0), NewOp);
2638 MI.eraseFromParent();
2639 return Legalized;
2640}
2641
2643LegalizerHelper::widenScalarAddSubShlSat(MachineInstr &MI, unsigned TypeIdx,
2644 LLT WideTy) {
2645 bool IsSigned = MI.getOpcode() == TargetOpcode::G_SADDSAT ||
2646 MI.getOpcode() == TargetOpcode::G_SSUBSAT ||
2647 MI.getOpcode() == TargetOpcode::G_SSHLSAT;
2648 bool IsShift = MI.getOpcode() == TargetOpcode::G_SSHLSAT ||
2649 MI.getOpcode() == TargetOpcode::G_USHLSAT;
2650 // We can convert this to:
2651 // 1. Any extend iN to iM
2652 // 2. SHL by M-N
2653 // 3. [US][ADD|SUB|SHL]SAT
2654 // 4. L/ASHR by M-N
2655 //
2656 // It may be more efficient to lower this to a min and a max operation in
2657 // the higher precision arithmetic if the promoted operation isn't legal,
2658 // but this decision is up to the target's lowering request.
2659 Register DstReg = MI.getOperand(0).getReg();
2660
2661 unsigned NewBits = WideTy.getScalarSizeInBits();
2662 unsigned SHLAmount = NewBits - MRI.getType(DstReg).getScalarSizeInBits();
2663
2664 // Shifts must zero-extend the RHS to preserve the unsigned quantity, and
2665 // must not left shift the RHS to preserve the shift amount.
2666 auto LHS = MIRBuilder.buildAnyExt(WideTy, MI.getOperand(1));
2667 auto RHS = IsShift ? MIRBuilder.buildZExt(WideTy, MI.getOperand(2))
2668 : MIRBuilder.buildAnyExt(WideTy, MI.getOperand(2));
2669 auto ShiftK = MIRBuilder.buildConstant(WideTy, SHLAmount);
2670 auto ShiftL = MIRBuilder.buildShl(WideTy, LHS, ShiftK);
2671 auto ShiftR = IsShift ? RHS : MIRBuilder.buildShl(WideTy, RHS, ShiftK);
2672
2673 auto WideInst = MIRBuilder.buildInstr(MI.getOpcode(), {WideTy},
2674 {ShiftL, ShiftR}, MI.getFlags());
2675
2676 // Use a shift that will preserve the number of sign bits when the trunc is
2677 // folded away.
2678 auto Result = IsSigned ? MIRBuilder.buildAShr(WideTy, WideInst, ShiftK)
2679 : MIRBuilder.buildLShr(WideTy, WideInst, ShiftK);
2680
2681 MIRBuilder.buildTrunc(DstReg, Result);
2682 MI.eraseFromParent();
2683 return Legalized;
2684}
2685
2687LegalizerHelper::widenScalarMulo(MachineInstr &MI, unsigned TypeIdx,
2688 LLT WideTy) {
2689 if (TypeIdx == 1) {
2690 Observer.changingInstr(MI);
2691 widenScalarDst(MI, WideTy, 1);
2692 Observer.changedInstr(MI);
2693 return Legalized;
2694 }
2695
2696 bool IsSigned = MI.getOpcode() == TargetOpcode::G_SMULO;
2697 auto [Result, OriginalOverflow, LHS, RHS] = MI.getFirst4Regs();
2698 LLT SrcTy = MRI.getType(LHS);
2699 LLT OverflowTy = MRI.getType(OriginalOverflow);
2700 unsigned SrcBitWidth = SrcTy.getScalarSizeInBits();
2701
2702 // To determine if the result overflowed in the larger type, we extend the
2703 // input to the larger type, do the multiply (checking if it overflows),
2704 // then also check the high bits of the result to see if overflow happened
2705 // there.
2706 unsigned ExtOp = IsSigned ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
2707 auto LeftOperand = MIRBuilder.buildInstr(ExtOp, {WideTy}, {LHS});
2708 auto RightOperand = MIRBuilder.buildInstr(ExtOp, {WideTy}, {RHS});
2709
2710 // Multiplication cannot overflow if the WideTy is >= 2 * original width,
2711 // so we don't need to check the overflow result of larger type Mulo.
2712 bool WideMulCanOverflow = WideTy.getScalarSizeInBits() < 2 * SrcBitWidth;
2713
2714 unsigned MulOpc =
2715 WideMulCanOverflow ? MI.getOpcode() : (unsigned)TargetOpcode::G_MUL;
2716
2717 MachineInstrBuilder Mulo;
2718 if (WideMulCanOverflow)
2719 Mulo = MIRBuilder.buildInstr(MulOpc, {WideTy, OverflowTy},
2720 {LeftOperand, RightOperand});
2721 else
2722 Mulo = MIRBuilder.buildInstr(MulOpc, {WideTy}, {LeftOperand, RightOperand});
2723
2724 auto Mul = Mulo->getOperand(0);
2725 MIRBuilder.buildTrunc(Result, Mul);
2726
2727 MachineInstrBuilder ExtResult;
2728 // Overflow occurred if it occurred in the larger type, or if the high part
2729 // of the result does not zero/sign-extend the low part. Check this second
2730 // possibility first.
2731 if (IsSigned) {
2732 // For signed, overflow occurred when the high part does not sign-extend
2733 // the low part.
2734 ExtResult = MIRBuilder.buildSExtInReg(WideTy, Mul, SrcBitWidth);
2735 } else {
2736 // Unsigned overflow occurred when the high part does not zero-extend the
2737 // low part.
2738 ExtResult = MIRBuilder.buildZExtInReg(WideTy, Mul, SrcBitWidth);
2739 }
2740
2741 if (WideMulCanOverflow) {
2742 auto Overflow =
2743 MIRBuilder.buildICmp(CmpInst::ICMP_NE, OverflowTy, Mul, ExtResult);
2744 // Finally check if the multiplication in the larger type itself overflowed.
2745 MIRBuilder.buildOr(OriginalOverflow, Mulo->getOperand(1), Overflow);
2746 } else {
2747 MIRBuilder.buildICmp(CmpInst::ICMP_NE, OriginalOverflow, Mul, ExtResult);
2748 }
2749 MI.eraseFromParent();
2750 return Legalized;
2751}
2752
2755 unsigned Opcode = MI.getOpcode();
2756 switch (Opcode) {
2757 default:
2758 return UnableToLegalize;
2759 case TargetOpcode::G_ATOMICRMW_XCHG:
2760 case TargetOpcode::G_ATOMICRMW_ADD:
2761 case TargetOpcode::G_ATOMICRMW_SUB:
2762 case TargetOpcode::G_ATOMICRMW_AND:
2763 case TargetOpcode::G_ATOMICRMW_OR:
2764 case TargetOpcode::G_ATOMICRMW_XOR:
2765 case TargetOpcode::G_ATOMICRMW_MIN:
2766 case TargetOpcode::G_ATOMICRMW_MAX:
2767 case TargetOpcode::G_ATOMICRMW_UMIN:
2768 case TargetOpcode::G_ATOMICRMW_UMAX:
2769 assert(TypeIdx == 0 && "atomicrmw with second scalar type");
2770 Observer.changingInstr(MI);
2771 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
2772 widenScalarDst(MI, WideTy, 0);
2773 Observer.changedInstr(MI);
2774 return Legalized;
2775 case TargetOpcode::G_ATOMIC_CMPXCHG:
2776 assert(TypeIdx == 0 && "G_ATOMIC_CMPXCHG with second scalar type");
2777 Observer.changingInstr(MI);
2778 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
2779 widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ANYEXT);
2780 widenScalarDst(MI, WideTy, 0);
2781 Observer.changedInstr(MI);
2782 return Legalized;
2783 case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS:
2784 if (TypeIdx == 0) {
2785 Observer.changingInstr(MI);
2786 widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ANYEXT);
2787 widenScalarSrc(MI, WideTy, 4, TargetOpcode::G_ANYEXT);
2788 widenScalarDst(MI, WideTy, 0);
2789 Observer.changedInstr(MI);
2790 return Legalized;
2791 }
2792 assert(TypeIdx == 1 &&
2793 "G_ATOMIC_CMPXCHG_WITH_SUCCESS with third scalar type");
2794 Observer.changingInstr(MI);
2795 widenScalarDst(MI, WideTy, 1);
2796 Observer.changedInstr(MI);
2797 return Legalized;
2798 case TargetOpcode::G_EXTRACT:
2799 return widenScalarExtract(MI, TypeIdx, WideTy);
2800 case TargetOpcode::G_INSERT:
2801 return widenScalarInsert(MI, TypeIdx, WideTy);
2802 case TargetOpcode::G_MERGE_VALUES:
2803 return widenScalarMergeValues(MI, TypeIdx, WideTy);
2804 case TargetOpcode::G_UNMERGE_VALUES:
2805 return widenScalarUnmergeValues(MI, TypeIdx, WideTy);
2806 case TargetOpcode::G_SADDO:
2807 case TargetOpcode::G_SSUBO:
2808 case TargetOpcode::G_UADDO:
2809 case TargetOpcode::G_USUBO:
2810 case TargetOpcode::G_SADDE:
2811 case TargetOpcode::G_SSUBE:
2812 case TargetOpcode::G_UADDE:
2813 case TargetOpcode::G_USUBE:
2814 return widenScalarAddSubOverflow(MI, TypeIdx, WideTy);
2815 case TargetOpcode::G_UMULO:
2816 case TargetOpcode::G_SMULO:
2817 return widenScalarMulo(MI, TypeIdx, WideTy);
2818 case TargetOpcode::G_SADDSAT:
2819 case TargetOpcode::G_SSUBSAT:
2820 case TargetOpcode::G_SSHLSAT:
2821 case TargetOpcode::G_UADDSAT:
2822 case TargetOpcode::G_USUBSAT:
2823 case TargetOpcode::G_USHLSAT:
2824 return widenScalarAddSubShlSat(MI, TypeIdx, WideTy);
2825 case TargetOpcode::G_CTTZ:
2826 case TargetOpcode::G_CTTZ_ZERO_POISON:
2827 case TargetOpcode::G_CTLZ:
2828 case TargetOpcode::G_CTLZ_ZERO_POISON:
2829 case TargetOpcode::G_CTLS:
2830 case TargetOpcode::G_CTPOP: {
2831 if (TypeIdx == 0) {
2832 Observer.changingInstr(MI);
2833 widenScalarDst(MI, WideTy, 0);
2834 Observer.changedInstr(MI);
2835 return Legalized;
2836 }
2837
2838 Register SrcReg = MI.getOperand(1).getReg();
2839
2840 // First extend the input.
2841 unsigned ExtOpc;
2842 switch (Opcode) {
2843 case TargetOpcode::G_CTTZ:
2844 case TargetOpcode::G_CTTZ_ZERO_POISON:
2845 case TargetOpcode::G_CTLZ_ZERO_POISON: // poison shifted out below
2846 ExtOpc = TargetOpcode::G_ANYEXT;
2847 break;
2848 case TargetOpcode::G_CTLS:
2849 ExtOpc = TargetOpcode::G_SEXT;
2850 break;
2851 default:
2852 ExtOpc = TargetOpcode::G_ZEXT;
2853 }
2854
2855 auto MIBSrc = MIRBuilder.buildInstr(ExtOpc, {WideTy}, {SrcReg});
2856 LLT CurTy = MRI.getType(SrcReg);
2857 unsigned NewOpc = Opcode;
2858 if (NewOpc == TargetOpcode::G_CTTZ) {
2859 // The count is the same in the larger type except if the original
2860 // value was zero. This can be handled by setting the bit just off
2861 // the top of the original type.
2862 auto TopBit = APInt::getOneBitSet(WideTy.getScalarSizeInBits(),
2863 CurTy.getScalarSizeInBits());
2864 MIBSrc = MIRBuilder.buildOr(
2865 WideTy, MIBSrc, MIRBuilder.buildConstant(WideTy, TopBit));
2866 // Now we know the operand is non-zero, use the more relaxed opcode.
2867 NewOpc = TargetOpcode::G_CTTZ_ZERO_POISON;
2868 }
2869
2870 unsigned SizeDiff =
2871 WideTy.getScalarSizeInBits() - CurTy.getScalarSizeInBits();
2872
2873 if (Opcode == TargetOpcode::G_CTLZ_ZERO_POISON) {
2874 // An optimization where the result is the CTLZ after the left shift by
2875 // (Difference in widety and current ty), that is,
2876 // MIBSrc = MIBSrc << (sizeinbits(WideTy) - sizeinbits(CurTy))
2877 // Result = ctlz MIBSrc
2878 MIBSrc = MIRBuilder.buildShl(WideTy, MIBSrc,
2879 MIRBuilder.buildConstant(WideTy, SizeDiff));
2880 }
2881
2882 // Perform the operation at the larger size.
2883 auto MIBNewOp = MIRBuilder.buildInstr(NewOpc, {WideTy}, {MIBSrc});
2884 // This is already the correct result for CTPOP and CTTZs
2885 if (Opcode == TargetOpcode::G_CTLZ || Opcode == TargetOpcode::G_CTLS) {
2886 // The correct result is NewOp - (Difference in widety and current ty).
2887 // At this stage SUB is guaranteed to be positive no-wrap,
2888 // that to be used in further KnownBits optimizations for CTLZ.
2889 MIBNewOp = MIRBuilder.buildSub(
2890 WideTy, MIBNewOp, MIRBuilder.buildConstant(WideTy, SizeDiff),
2891 Opcode == TargetOpcode::G_CTLZ
2892 ? std::optional<unsigned>(MachineInstr::NoUWrap)
2893 : std::nullopt);
2894 }
2895
2896 MIRBuilder.buildZExtOrTrunc(MI.getOperand(0), MIBNewOp);
2897 MI.eraseFromParent();
2898 return Legalized;
2899 }
2900 case TargetOpcode::G_BSWAP: {
2901 Observer.changingInstr(MI);
2902 Register DstReg = MI.getOperand(0).getReg();
2903
2904 Register ShrReg = MRI.createGenericVirtualRegister(WideTy);
2905 Register DstExt = MRI.createGenericVirtualRegister(WideTy);
2906 Register ShiftAmtReg = MRI.createGenericVirtualRegister(WideTy);
2907 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
2908
2909 MI.getOperand(0).setReg(DstExt);
2910
2911 MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
2912
2913 LLT Ty = MRI.getType(DstReg);
2914 unsigned DiffBits = WideTy.getScalarSizeInBits() - Ty.getScalarSizeInBits();
2915 MIRBuilder.buildConstant(ShiftAmtReg, DiffBits);
2916 MIRBuilder.buildLShr(ShrReg, DstExt, ShiftAmtReg);
2917
2918 MIRBuilder.buildTrunc(DstReg, ShrReg);
2919 Observer.changedInstr(MI);
2920 return Legalized;
2921 }
2922 case TargetOpcode::G_BITREVERSE: {
2923 Observer.changingInstr(MI);
2924
2925 Register DstReg = MI.getOperand(0).getReg();
2926 LLT Ty = MRI.getType(DstReg);
2927 unsigned DiffBits = WideTy.getScalarSizeInBits() - Ty.getScalarSizeInBits();
2928
2929 Register DstExt = MRI.createGenericVirtualRegister(WideTy);
2930 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
2931 MI.getOperand(0).setReg(DstExt);
2932 MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
2933
2934 auto ShiftAmt = MIRBuilder.buildConstant(WideTy, DiffBits);
2935 auto Shift = MIRBuilder.buildLShr(WideTy, DstExt, ShiftAmt);
2936 MIRBuilder.buildTrunc(DstReg, Shift);
2937 Observer.changedInstr(MI);
2938 return Legalized;
2939 }
2940 case TargetOpcode::G_FREEZE:
2941 case TargetOpcode::G_CONSTANT_FOLD_BARRIER:
2942 Observer.changingInstr(MI);
2943 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
2944 widenScalarDst(MI, WideTy);
2945 Observer.changedInstr(MI);
2946 return Legalized;
2947
2948 case TargetOpcode::G_ABS:
2949 Observer.changingInstr(MI);
2950 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
2951 widenScalarDst(MI, WideTy);
2952 Observer.changedInstr(MI);
2953 return Legalized;
2954
2955 case TargetOpcode::G_ADD:
2956 case TargetOpcode::G_AND:
2957 case TargetOpcode::G_MUL:
2958 case TargetOpcode::G_OR:
2959 case TargetOpcode::G_XOR:
2960 case TargetOpcode::G_SUB:
2961 case TargetOpcode::G_SHUFFLE_VECTOR:
2962 // Perform operation at larger width (any extension is fines here, high bits
2963 // don't affect the result) and then truncate the result back to the
2964 // original type.
2965 Observer.changingInstr(MI);
2966 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
2967 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
2968 widenScalarDst(MI, WideTy);
2969 Observer.changedInstr(MI);
2970 return Legalized;
2971
2972 case TargetOpcode::G_SBFX:
2973 case TargetOpcode::G_UBFX:
2974 Observer.changingInstr(MI);
2975
2976 if (TypeIdx == 0) {
2977 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
2978 widenScalarDst(MI, WideTy);
2979 } else {
2980 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
2981 widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ZEXT);
2982 }
2983
2984 Observer.changedInstr(MI);
2985 return Legalized;
2986
2987 case TargetOpcode::G_SHL:
2988 Observer.changingInstr(MI);
2989
2990 if (TypeIdx == 0) {
2991 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
2992 widenScalarDst(MI, WideTy);
2993 } else {
2994 assert(TypeIdx == 1);
2995 // The "number of bits to shift" operand must preserve its value as an
2996 // unsigned integer:
2997 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
2998 }
2999
3000 Observer.changedInstr(MI);
3001 return Legalized;
3002
3003 case TargetOpcode::G_ROTR:
3004 case TargetOpcode::G_ROTL:
3005 if (TypeIdx != 1)
3006 return UnableToLegalize;
3007
3008 Observer.changingInstr(MI);
3009 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
3010 Observer.changedInstr(MI);
3011 return Legalized;
3012
3013 case TargetOpcode::G_SDIV:
3014 case TargetOpcode::G_SREM:
3015 case TargetOpcode::G_SMIN:
3016 case TargetOpcode::G_SMAX:
3017 case TargetOpcode::G_ABDS:
3018 Observer.changingInstr(MI);
3019 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
3020 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
3021 widenScalarDst(MI, WideTy);
3022 Observer.changedInstr(MI);
3023 return Legalized;
3024
3025 case TargetOpcode::G_SDIVREM:
3026 Observer.changingInstr(MI);
3027 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
3028 widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_SEXT);
3029 widenScalarDst(MI, WideTy);
3030 MIRBuilder.setInsertPt(MIRBuilder.getMBB(), --MIRBuilder.getInsertPt());
3031 widenScalarDst(MI, WideTy, 1);
3032 Observer.changedInstr(MI);
3033 return Legalized;
3034
3035 case TargetOpcode::G_ASHR:
3036 case TargetOpcode::G_LSHR:
3037 Observer.changingInstr(MI);
3038
3039 if (TypeIdx == 0) {
3040 unsigned CvtOp = Opcode == TargetOpcode::G_ASHR ? TargetOpcode::G_SEXT
3041 : TargetOpcode::G_ZEXT;
3042
3043 widenScalarSrc(MI, WideTy, 1, CvtOp);
3044 widenScalarDst(MI, WideTy);
3045 } else {
3046 assert(TypeIdx == 1);
3047 // The "number of bits to shift" operand must preserve its value as an
3048 // unsigned integer:
3049 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
3050 }
3051
3052 Observer.changedInstr(MI);
3053 return Legalized;
3054 case TargetOpcode::G_UDIV:
3055 case TargetOpcode::G_UREM:
3056 case TargetOpcode::G_ABDU:
3057 Observer.changingInstr(MI);
3058 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
3059 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
3060 widenScalarDst(MI, WideTy);
3061 Observer.changedInstr(MI);
3062 return Legalized;
3063 case TargetOpcode::G_UDIVREM:
3064 Observer.changingInstr(MI);
3065 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
3066 widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ZEXT);
3067 widenScalarDst(MI, WideTy);
3068 MIRBuilder.setInsertPt(MIRBuilder.getMBB(), --MIRBuilder.getInsertPt());
3069 widenScalarDst(MI, WideTy, 1);
3070 Observer.changedInstr(MI);
3071 return Legalized;
3072 case TargetOpcode::G_UMIN:
3073 case TargetOpcode::G_UMAX: {
3074 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
3075
3076 auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
3077 unsigned ExtOpc =
3078 TLI.isSExtCheaperThanZExt(getApproximateEVTForLLT(Ty, Ctx),
3079 getApproximateEVTForLLT(WideTy, Ctx))
3080 ? TargetOpcode::G_SEXT
3081 : TargetOpcode::G_ZEXT;
3082
3083 Observer.changingInstr(MI);
3084 widenScalarSrc(MI, WideTy, 1, ExtOpc);
3085 widenScalarSrc(MI, WideTy, 2, ExtOpc);
3086 widenScalarDst(MI, WideTy);
3087 Observer.changedInstr(MI);
3088 return Legalized;
3089 }
3090
3091 case TargetOpcode::G_SELECT:
3092 Observer.changingInstr(MI);
3093 if (TypeIdx == 0) {
3094 // Perform operation at larger width (any extension is fine here, high
3095 // bits don't affect the result) and then truncate the result back to the
3096 // original type.
3097 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
3098 widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ANYEXT);
3099 widenScalarDst(MI, WideTy);
3100 } else {
3101 bool IsVec = MRI.getType(MI.getOperand(1).getReg()).isVector();
3102 // Explicit extension is required here since high bits affect the result.
3103 widenScalarSrc(MI, WideTy, 1, MIRBuilder.getBoolExtOp(IsVec, false));
3104 }
3105 Observer.changedInstr(MI);
3106 return Legalized;
3107
3108 case TargetOpcode::G_FPEXT:
3109 if (TypeIdx != 1)
3110 return UnableToLegalize;
3111
3112 Observer.changingInstr(MI);
3113 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_FPEXT);
3114 Observer.changedInstr(MI);
3115 return Legalized;
3116 case TargetOpcode::G_FPTOSI:
3117 case TargetOpcode::G_FPTOUI:
3118 case TargetOpcode::G_INTRINSIC_LRINT:
3119 case TargetOpcode::G_INTRINSIC_LLRINT:
3120 case TargetOpcode::G_IS_FPCLASS:
3121 Observer.changingInstr(MI);
3122
3123 if (TypeIdx == 0)
3124 widenScalarDst(MI, WideTy);
3125 else
3126 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_FPEXT);
3127
3128 Observer.changedInstr(MI);
3129 return Legalized;
3130 case TargetOpcode::G_SITOFP:
3131 Observer.changingInstr(MI);
3132
3133 if (TypeIdx == 0)
3134 widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
3135 else
3136 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
3137
3138 Observer.changedInstr(MI);
3139 return Legalized;
3140 case TargetOpcode::G_UITOFP:
3141 Observer.changingInstr(MI);
3142
3143 if (TypeIdx == 0)
3144 widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
3145 else
3146 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
3147
3148 Observer.changedInstr(MI);
3149 return Legalized;
3150 case TargetOpcode::G_FPTOSI_SAT:
3151 case TargetOpcode::G_FPTOUI_SAT:
3152 Observer.changingInstr(MI);
3153
3154 if (TypeIdx == 0) {
3155 Register OldDst = MI.getOperand(0).getReg();
3156 LLT Ty = MRI.getType(OldDst);
3157 Register ExtReg = MRI.createGenericVirtualRegister(WideTy);
3158 Register NewDst;
3159 MI.getOperand(0).setReg(ExtReg);
3160 uint64_t ShortBits = Ty.getScalarSizeInBits();
3161 uint64_t WideBits = WideTy.getScalarSizeInBits();
3162 MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
3163 if (Opcode == TargetOpcode::G_FPTOSI_SAT) {
3164 // z = i16 fptosi_sat(a)
3165 // ->
3166 // x = i32 fptosi_sat(a)
3167 // y = smin(x, 32767)
3168 // z = smax(y, -32768)
3169 auto MaxVal = MIRBuilder.buildConstant(
3170 WideTy, APInt::getSignedMaxValue(ShortBits).sext(WideBits));
3171 auto MinVal = MIRBuilder.buildConstant(
3172 WideTy, APInt::getSignedMinValue(ShortBits).sext(WideBits));
3173 Register MidReg =
3174 MIRBuilder.buildSMin(WideTy, ExtReg, MaxVal).getReg(0);
3175 NewDst = MIRBuilder.buildSMax(WideTy, MidReg, MinVal).getReg(0);
3176 } else {
3177 // z = i16 fptoui_sat(a)
3178 // ->
3179 // x = i32 fptoui_sat(a)
3180 // y = smin(x, 65535)
3181 auto MaxVal = MIRBuilder.buildConstant(
3182 WideTy, APInt::getAllOnes(ShortBits).zext(WideBits));
3183 NewDst = MIRBuilder.buildUMin(WideTy, ExtReg, MaxVal).getReg(0);
3184 }
3185 MIRBuilder.buildTrunc(OldDst, NewDst);
3186 } else
3187 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_FPEXT);
3188
3189 Observer.changedInstr(MI);
3190 return Legalized;
3191 case TargetOpcode::G_LOAD:
3192 case TargetOpcode::G_SEXTLOAD:
3193 case TargetOpcode::G_ZEXTLOAD:
3194 case TargetOpcode::G_FPEXTLOAD:
3195 Observer.changingInstr(MI);
3196 widenScalarDst(MI, WideTy);
3197 Observer.changedInstr(MI);
3198 return Legalized;
3199
3200 case TargetOpcode::G_STORE: {
3201 if (TypeIdx != 0)
3202 return UnableToLegalize;
3203
3204 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
3205 assert(!Ty.isPointerOrPointerVector() && "Can't widen type");
3206 if (!Ty.isScalar()) {
3207 // We need to widen the vector element type.
3208 Observer.changingInstr(MI);
3209 widenScalarSrc(MI, WideTy, 0, TargetOpcode::G_ANYEXT);
3210 // We also need to adjust the MMO to turn this into a truncating store.
3211 MachineMemOperand &MMO = **MI.memoperands_begin();
3212 MachineFunction &MF = MIRBuilder.getMF();
3213 auto *NewMMO = MF.getMachineMemOperand(&MMO, MMO.getPointerInfo(), Ty);
3214 MI.setMemRefs(MF, {NewMMO});
3215 Observer.changedInstr(MI);
3216 return Legalized;
3217 }
3218
3219 Observer.changingInstr(MI);
3220
3221 unsigned ExtType = Ty.getScalarSizeInBits() == 1 ?
3222 TargetOpcode::G_ZEXT : TargetOpcode::G_ANYEXT;
3223 widenScalarSrc(MI, WideTy, 0, ExtType);
3224
3225 Observer.changedInstr(MI);
3226 return Legalized;
3227 }
3228 case TargetOpcode::G_FPTRUNCSTORE:
3229 if (TypeIdx != 0)
3230 return UnableToLegalize;
3231 Observer.changingInstr(MI);
3232 widenScalarSrc(MI, WideTy, 0, TargetOpcode::G_FPEXT);
3233 Observer.changedInstr(MI);
3234 return Legalized;
3235 case TargetOpcode::G_CONSTANT: {
3236 MachineOperand &SrcMO = MI.getOperand(1);
3237 LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
3238 unsigned ExtOpc = LI.getExtOpcodeForWideningConstant(
3239 MRI.getType(MI.getOperand(0).getReg()));
3240 assert((ExtOpc == TargetOpcode::G_ZEXT || ExtOpc == TargetOpcode::G_SEXT ||
3241 ExtOpc == TargetOpcode::G_ANYEXT) &&
3242 "Illegal Extend");
3243 const APInt &SrcVal = SrcMO.getCImm()->getValue();
3244 const APInt &Val = (ExtOpc == TargetOpcode::G_SEXT)
3245 ? SrcVal.sext(WideTy.getSizeInBits())
3246 : SrcVal.zext(WideTy.getSizeInBits());
3247 Observer.changingInstr(MI);
3248 SrcMO.setCImm(ConstantInt::get(Ctx, Val));
3249
3250 widenScalarDst(MI, WideTy);
3251 Observer.changedInstr(MI);
3252 return Legalized;
3253 }
3254 case TargetOpcode::G_FCONSTANT: {
3255 // To avoid changing the bits of the constant due to extension to a larger
3256 // type and then using G_FPTRUNC, we simply convert to a G_CONSTANT.
3257 MachineOperand &SrcMO = MI.getOperand(1);
3258 APInt Val = SrcMO.getFPImm()->getValueAPF().bitcastToAPInt();
3259 MIRBuilder.setInstrAndDebugLoc(MI);
3260 auto IntCst = MIRBuilder.buildConstant(MI.getOperand(0).getReg(), Val);
3261 widenScalarDst(*IntCst, WideTy, 0, TargetOpcode::G_TRUNC);
3262 MI.eraseFromParent();
3263 return Legalized;
3264 }
3265 case TargetOpcode::G_IMPLICIT_DEF: {
3266 Observer.changingInstr(MI);
3267 widenScalarDst(MI, WideTy);
3268 Observer.changedInstr(MI);
3269 return Legalized;
3270 }
3271 case TargetOpcode::G_BRCOND:
3272 Observer.changingInstr(MI);
3273 widenScalarSrc(MI, WideTy, 0, MIRBuilder.getBoolExtOp(false, false));
3274 Observer.changedInstr(MI);
3275 return Legalized;
3276
3277 case TargetOpcode::G_FCMP:
3278 Observer.changingInstr(MI);
3279 if (TypeIdx == 0)
3280 widenScalarDst(MI, WideTy);
3281 else {
3282 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_FPEXT);
3283 widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_FPEXT);
3284 }
3285 Observer.changedInstr(MI);
3286 return Legalized;
3287
3288 case TargetOpcode::G_ICMP:
3289 Observer.changingInstr(MI);
3290 if (TypeIdx == 0)
3291 widenScalarDst(MI, WideTy);
3292 else {
3293 LLT SrcTy = MRI.getType(MI.getOperand(2).getReg());
3294 CmpInst::Predicate Pred =
3295 static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
3296
3297 auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
3298 unsigned ExtOpcode =
3299 (CmpInst::isSigned(Pred) ||
3300 TLI.isSExtCheaperThanZExt(getApproximateEVTForLLT(SrcTy, Ctx),
3301 getApproximateEVTForLLT(WideTy, Ctx)))
3302 ? TargetOpcode::G_SEXT
3303 : TargetOpcode::G_ZEXT;
3304 widenScalarSrc(MI, WideTy, 2, ExtOpcode);
3305 widenScalarSrc(MI, WideTy, 3, ExtOpcode);
3306 }
3307 Observer.changedInstr(MI);
3308 return Legalized;
3309
3310 case TargetOpcode::G_PTR_ADD:
3311 assert(TypeIdx == 1 && "unable to legalize pointer of G_PTR_ADD");
3312 Observer.changingInstr(MI);
3313 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
3314 Observer.changedInstr(MI);
3315 return Legalized;
3316
3317 case TargetOpcode::G_PHI: {
3318 assert(TypeIdx == 0 && "Expecting only Idx 0");
3319
3320 Observer.changingInstr(MI);
3321 for (unsigned I = 1; I < MI.getNumOperands(); I += 2) {
3322 MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB();
3323 MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminatorForward());
3324 widenScalarSrc(MI, WideTy, I, TargetOpcode::G_ANYEXT);
3325 }
3326
3327 MachineBasicBlock &MBB = *MI.getParent();
3328 MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI());
3329 widenScalarDst(MI, WideTy);
3330 Observer.changedInstr(MI);
3331 return Legalized;
3332 }
3333 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
3334 if (TypeIdx == 0) {
3335 Register VecReg = MI.getOperand(1).getReg();
3336 LLT VecTy = MRI.getType(VecReg);
3337 Observer.changingInstr(MI);
3338
3339 widenScalarSrc(MI, LLT::vector(VecTy.getElementCount(), WideTy), 1,
3340 TargetOpcode::G_ANYEXT);
3341
3342 widenScalarDst(MI, WideTy, 0);
3343 Observer.changedInstr(MI);
3344 return Legalized;
3345 }
3346
3347 if (TypeIdx != 2)
3348 return UnableToLegalize;
3349 Observer.changingInstr(MI);
3350 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
3351 Observer.changedInstr(MI);
3352 return Legalized;
3353 }
3354 case TargetOpcode::G_INSERT_VECTOR_ELT: {
3355 if (TypeIdx == 0) {
3356 Observer.changingInstr(MI);
3357 const LLT WideEltTy = WideTy.getElementType();
3358
3359 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
3360 widenScalarSrc(MI, WideEltTy, 2, TargetOpcode::G_ANYEXT);
3361 widenScalarDst(MI, WideTy, 0);
3362 Observer.changedInstr(MI);
3363 return Legalized;
3364 }
3365
3366 if (TypeIdx == 1) {
3367 Observer.changingInstr(MI);
3368
3369 Register VecReg = MI.getOperand(1).getReg();
3370 LLT VecTy = MRI.getType(VecReg);
3371 LLT WideVecTy = VecTy.changeVectorElementType(WideTy);
3372
3373 widenScalarSrc(MI, WideVecTy, 1, TargetOpcode::G_ANYEXT);
3374 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
3375 widenScalarDst(MI, WideVecTy, 0);
3376 Observer.changedInstr(MI);
3377 return Legalized;
3378 }
3379
3380 if (TypeIdx == 2) {
3381 Observer.changingInstr(MI);
3382 widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ZEXT);
3383 Observer.changedInstr(MI);
3384 return Legalized;
3385 }
3386
3387 return UnableToLegalize;
3388 }
3389 case TargetOpcode::G_FADD:
3390 case TargetOpcode::G_FMUL:
3391 case TargetOpcode::G_FSUB:
3392 case TargetOpcode::G_FMA:
3393 case TargetOpcode::G_FMAD:
3394 case TargetOpcode::G_FNEG:
3395 case TargetOpcode::G_FABS:
3396 case TargetOpcode::G_FCANONICALIZE:
3397 case TargetOpcode::G_FMINNUM:
3398 case TargetOpcode::G_FMAXNUM:
3399 case TargetOpcode::G_FMINNUM_IEEE:
3400 case TargetOpcode::G_FMAXNUM_IEEE:
3401 case TargetOpcode::G_FMINIMUM:
3402 case TargetOpcode::G_FMAXIMUM:
3403 case TargetOpcode::G_FMINIMUMNUM:
3404 case TargetOpcode::G_FMAXIMUMNUM:
3405 case TargetOpcode::G_FDIV:
3406 case TargetOpcode::G_FREM:
3407 case TargetOpcode::G_FCEIL:
3408 case TargetOpcode::G_FFLOOR:
3409 case TargetOpcode::G_FCOS:
3410 case TargetOpcode::G_FSIN:
3411 case TargetOpcode::G_FTAN:
3412 case TargetOpcode::G_FACOS:
3413 case TargetOpcode::G_FASIN:
3414 case TargetOpcode::G_FATAN:
3415 case TargetOpcode::G_FATAN2:
3416 case TargetOpcode::G_FCOSH:
3417 case TargetOpcode::G_FSINH:
3418 case TargetOpcode::G_FTANH:
3419 case TargetOpcode::G_FLOG10:
3420 case TargetOpcode::G_FLOG:
3421 case TargetOpcode::G_FLOG2:
3422 case TargetOpcode::G_FRINT:
3423 case TargetOpcode::G_FNEARBYINT:
3424 case TargetOpcode::G_FSQRT:
3425 case TargetOpcode::G_FEXP:
3426 case TargetOpcode::G_FEXP2:
3427 case TargetOpcode::G_FEXP10:
3428 case TargetOpcode::G_FPOW:
3429 case TargetOpcode::G_INTRINSIC_TRUNC:
3430 case TargetOpcode::G_INTRINSIC_ROUND:
3431 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
3432 assert(TypeIdx == 0);
3433 Observer.changingInstr(MI);
3434
3435 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I)
3436 widenScalarSrc(MI, WideTy, I, TargetOpcode::G_FPEXT);
3437
3438 widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
3439 Observer.changedInstr(MI);
3440 return Legalized;
3441 case TargetOpcode::G_FMODF: {
3442 Observer.changingInstr(MI);
3443 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_FPEXT);
3444
3445 widenScalarDst(MI, WideTy, 1, TargetOpcode::G_FPTRUNC);
3446 MIRBuilder.setInsertPt(MIRBuilder.getMBB(), --MIRBuilder.getInsertPt());
3447 widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
3448 Observer.changedInstr(MI);
3449 return Legalized;
3450 }
3451 case TargetOpcode::G_FPOWI:
3452 case TargetOpcode::G_FLDEXP:
3453 case TargetOpcode::G_STRICT_FLDEXP: {
3454 if (TypeIdx == 0) {
3455 if (Opcode == TargetOpcode::G_STRICT_FLDEXP)
3456 return UnableToLegalize;
3457
3458 Observer.changingInstr(MI);
3459 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_FPEXT);
3460 widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
3461 Observer.changedInstr(MI);
3462 return Legalized;
3463 }
3464
3465 if (TypeIdx == 1) {
3466 // For some reason SelectionDAG tries to promote to a libcall without
3467 // actually changing the integer type for promotion.
3468 Observer.changingInstr(MI);
3469 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
3470 Observer.changedInstr(MI);
3471 return Legalized;
3472 }
3473
3474 return UnableToLegalize;
3475 }
3476 case TargetOpcode::G_FFREXP: {
3477 Observer.changingInstr(MI);
3478
3479 if (TypeIdx == 0) {
3480 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_FPEXT);
3481 widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
3482 } else {
3483 widenScalarDst(MI, WideTy, 1);
3484 }
3485
3486 Observer.changedInstr(MI);
3487 return Legalized;
3488 }
3489 case TargetOpcode::G_LROUND:
3490 case TargetOpcode::G_LLROUND:
3491 Observer.changingInstr(MI);
3492
3493 if (TypeIdx == 0)
3494 widenScalarDst(MI, WideTy);
3495 else
3496 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_FPEXT);
3497
3498 Observer.changedInstr(MI);
3499 return Legalized;
3500
3501 case TargetOpcode::G_INTTOPTR:
3502 if (TypeIdx != 1)
3503 return UnableToLegalize;
3504
3505 Observer.changingInstr(MI);
3506 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
3507 Observer.changedInstr(MI);
3508 return Legalized;
3509 case TargetOpcode::G_PTRTOINT:
3510 if (TypeIdx != 0)
3511 return UnableToLegalize;
3512
3513 Observer.changingInstr(MI);
3514 widenScalarDst(MI, WideTy, 0);
3515 Observer.changedInstr(MI);
3516 return Legalized;
3517 case TargetOpcode::G_BUILD_VECTOR: {
3518 Observer.changingInstr(MI);
3519
3520 const LLT WideEltTy = TypeIdx == 1 ? WideTy : WideTy.getElementType();
3521 for (int I = 1, E = MI.getNumOperands(); I != E; ++I)
3522 widenScalarSrc(MI, WideEltTy, I, TargetOpcode::G_ANYEXT);
3523
3524 // Avoid changing the result vector type if the source element type was
3525 // requested.
3526 if (TypeIdx == 1) {
3527 MI.setDesc(MIRBuilder.getTII().get(TargetOpcode::G_BUILD_VECTOR_TRUNC));
3528 } else {
3529 widenScalarDst(MI, WideTy, 0);
3530 }
3531
3532 Observer.changedInstr(MI);
3533 return Legalized;
3534 }
3535 case TargetOpcode::G_SEXT_INREG:
3536 if (TypeIdx != 0)
3537 return UnableToLegalize;
3538
3539 Observer.changingInstr(MI);
3540 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
3541 widenScalarDst(MI, WideTy, 0, TargetOpcode::G_TRUNC);
3542 Observer.changedInstr(MI);
3543 return Legalized;
3544 case TargetOpcode::G_PTRMASK: {
3545 if (TypeIdx != 1)
3546 return UnableToLegalize;
3547 Observer.changingInstr(MI);
3548 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
3549 Observer.changedInstr(MI);
3550 return Legalized;
3551 }
3552 case TargetOpcode::G_VECREDUCE_ADD: {
3553 if (TypeIdx != 1)
3554 return UnableToLegalize;
3555 Observer.changingInstr(MI);
3556 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
3557 widenScalarDst(MI, WideTy.getScalarType(), 0, TargetOpcode::G_TRUNC);
3558 Observer.changedInstr(MI);
3559 return Legalized;
3560 }
3561 case TargetOpcode::G_VECREDUCE_FADD:
3562 case TargetOpcode::G_VECREDUCE_FMUL:
3563 case TargetOpcode::G_VECREDUCE_FMIN:
3564 case TargetOpcode::G_VECREDUCE_FMAX:
3565 case TargetOpcode::G_VECREDUCE_FMINIMUM:
3566 case TargetOpcode::G_VECREDUCE_FMAXIMUM: {
3567 if (TypeIdx != 0)
3568 return UnableToLegalize;
3569 Observer.changingInstr(MI);
3570 Register VecReg = MI.getOperand(1).getReg();
3571 LLT VecTy = MRI.getType(VecReg);
3572 LLT WideVecTy = VecTy.changeElementType(WideTy);
3573 widenScalarSrc(MI, WideVecTy, 1, TargetOpcode::G_FPEXT);
3574 widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
3575 Observer.changedInstr(MI);
3576 return Legalized;
3577 }
3578 case TargetOpcode::G_VSCALE: {
3579 MachineOperand &SrcMO = MI.getOperand(1);
3580 LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
3581 const APInt &SrcVal = SrcMO.getCImm()->getValue();
3582 // The CImm is always a signed value
3583 const APInt Val = SrcVal.sext(WideTy.getSizeInBits());
3584 Observer.changingInstr(MI);
3585 SrcMO.setCImm(ConstantInt::get(Ctx, Val));
3586 widenScalarDst(MI, WideTy);
3587 Observer.changedInstr(MI);
3588 return Legalized;
3589 }
3590 case TargetOpcode::G_SPLAT_VECTOR: {
3591 if (TypeIdx != 1)
3592 return UnableToLegalize;
3593
3594 Observer.changingInstr(MI);
3595 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
3596 Observer.changedInstr(MI);
3597 return Legalized;
3598 }
3599 case TargetOpcode::G_INSERT_SUBVECTOR: {
3600 if (TypeIdx != 0)
3601 return UnableToLegalize;
3602
3604 Register BigVec = IS.getBigVec();
3605 Register SubVec = IS.getSubVec();
3606
3607 LLT SubVecTy = MRI.getType(SubVec);
3608 LLT SubVecWideTy = SubVecTy.changeElementType(WideTy.getElementType());
3609
3610 // Widen the G_INSERT_SUBVECTOR
3611 auto BigZExt = MIRBuilder.buildZExt(WideTy, BigVec);
3612 auto SubZExt = MIRBuilder.buildZExt(SubVecWideTy, SubVec);
3613 auto WideInsert = MIRBuilder.buildInsertSubvector(WideTy, BigZExt, SubZExt,
3614 IS.getIndexImm());
3615
3616 // Truncate back down
3617 auto SplatZero = MIRBuilder.buildSplatVector(
3618 WideTy, MIRBuilder.buildConstant(WideTy.getElementType(), 0));
3619 MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_NE, IS.getReg(0), WideInsert,
3620 SplatZero);
3621
3622 MI.eraseFromParent();
3623
3624 return Legalized;
3625 }
3626 }
3627}
3628
3630 MachineIRBuilder &B, Register Src, LLT Ty) {
3631 auto Unmerge = B.buildUnmerge(Ty, Src);
3632 for (int I = 0, E = Unmerge->getNumOperands() - 1; I != E; ++I)
3633 Pieces.push_back(Unmerge.getReg(I));
3634}
3635
3636static void emitLoadFromConstantPool(Register DstReg, const Constant *ConstVal,
3637 MachineIRBuilder &MIRBuilder) {
3638 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
3639 MachineFunction &MF = MIRBuilder.getMF();
3640 const DataLayout &DL = MIRBuilder.getDataLayout();
3641 unsigned AddrSpace = DL.getDefaultGlobalsAddressSpace();
3642 LLT AddrPtrTy = LLT::pointer(AddrSpace, DL.getPointerSizeInBits(AddrSpace));
3643 LLT DstLLT = MRI.getType(DstReg);
3644
3645 Align Alignment(DL.getABITypeAlign(ConstVal->getType()));
3646
3647 auto Addr = MIRBuilder.buildConstantPool(
3648 AddrPtrTy,
3649 MF.getConstantPool()->getConstantPoolIndex(ConstVal, Alignment));
3650
3651 MachineMemOperand *MMO =
3653 MachineMemOperand::MOLoad, DstLLT, Alignment);
3654
3655 MIRBuilder.buildLoadInstr(TargetOpcode::G_LOAD, DstReg, Addr, *MMO);
3656}
3657
3660 const MachineOperand &ConstOperand = MI.getOperand(1);
3661 const Constant *ConstantVal = ConstOperand.getCImm();
3662
3663 emitLoadFromConstantPool(MI.getOperand(0).getReg(), ConstantVal, MIRBuilder);
3664 MI.eraseFromParent();
3665
3666 return Legalized;
3667}
3668
3671 const MachineOperand &ConstOperand = MI.getOperand(1);
3672 const Constant *ConstantVal = ConstOperand.getFPImm();
3673
3674 emitLoadFromConstantPool(MI.getOperand(0).getReg(), ConstantVal, MIRBuilder);
3675 MI.eraseFromParent();
3676
3677 return Legalized;
3678}
3679
3682 auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
3683 if (SrcTy.isVector()) {
3684 LLT SrcEltTy = SrcTy.getElementType();
3686
3687 if (DstTy.isVector()) {
3688 int NumDstElt = DstTy.getNumElements();
3689 int NumSrcElt = SrcTy.getNumElements();
3690
3691 LLT DstEltTy = DstTy.getElementType();
3692 LLT DstCastTy = DstEltTy; // Intermediate bitcast result type
3693 LLT SrcPartTy = SrcEltTy; // Original unmerge result type.
3694
3695 // If there's an element size mismatch, insert intermediate casts to match
3696 // the result element type.
3697 if (NumSrcElt < NumDstElt) { // Source element type is larger.
3698 // %1:_(<4 x s8>) = G_BITCAST %0:_(<2 x s16>)
3699 //
3700 // =>
3701 //
3702 // %2:_(s16), %3:_(s16) = G_UNMERGE_VALUES %0
3703 // %3:_(<2 x s8>) = G_BITCAST %2
3704 // %4:_(<2 x s8>) = G_BITCAST %3
3705 // %1:_(<4 x s16>) = G_CONCAT_VECTORS %3, %4
3706 DstCastTy = DstTy.changeVectorElementCount(
3707 ElementCount::getFixed(NumDstElt / NumSrcElt));
3708 SrcPartTy = SrcEltTy;
3709 } else if (NumSrcElt > NumDstElt) { // Source element type is smaller.
3710 //
3711 // %1:_(<2 x s16>) = G_BITCAST %0:_(<4 x s8>)
3712 //
3713 // =>
3714 //
3715 // %2:_(<2 x s8>), %3:_(<2 x s8>) = G_UNMERGE_VALUES %0
3716 // %3:_(s16) = G_BITCAST %2
3717 // %4:_(s16) = G_BITCAST %3
3718 // %1:_(<2 x s16>) = G_BUILD_VECTOR %3, %4
3719 SrcPartTy = SrcTy.changeVectorElementCount(
3720 ElementCount::getFixed(NumSrcElt / NumDstElt));
3721 DstCastTy = DstEltTy;
3722 }
3723
3724 getUnmergePieces(SrcRegs, MIRBuilder, Src, SrcPartTy);
3725 for (Register &SrcReg : SrcRegs)
3726 SrcReg = MIRBuilder.buildBitcast(DstCastTy, SrcReg).getReg(0);
3727 } else
3728 getUnmergePieces(SrcRegs, MIRBuilder, Src, SrcEltTy);
3729
3730 MIRBuilder.buildMergeLikeInstr(Dst, SrcRegs);
3731 MI.eraseFromParent();
3732 return Legalized;
3733 }
3734
3735 if (DstTy.isVector()) {
3737 getUnmergePieces(SrcRegs, MIRBuilder, Src, DstTy.getElementType());
3738 MIRBuilder.buildMergeLikeInstr(Dst, SrcRegs);
3739 MI.eraseFromParent();
3740 return Legalized;
3741 }
3742
3743 return UnableToLegalize;
3744}
3745
3746/// Figure out the bit offset into a register when coercing a vector index for
3747/// the wide element type. This is only for the case when promoting vector to
3748/// one with larger elements.
3749//
3750///
3751/// %offset_idx = G_AND %idx, ~(-1 << Log2(DstEltSize / SrcEltSize))
3752/// %offset_bits = G_SHL %offset_idx, Log2(SrcEltSize)
3754 Register Idx,
3755 unsigned NewEltSize,
3756 unsigned OldEltSize) {
3757 const unsigned Log2EltRatio = Log2_32(NewEltSize / OldEltSize);
3758 LLT IdxTy = B.getMRI()->getType(Idx);
3759
3760 // Now figure out the amount we need to shift to get the target bits.
3761 auto OffsetMask = B.buildConstant(
3762 IdxTy, ~(APInt::getAllOnes(IdxTy.getSizeInBits()) << Log2EltRatio));
3763 auto OffsetIdx = B.buildAnd(IdxTy, Idx, OffsetMask);
3764 return B.buildShl(IdxTy, OffsetIdx,
3765 B.buildConstant(IdxTy, Log2_32(OldEltSize))).getReg(0);
3766}
3767
3768/// Perform a G_EXTRACT_VECTOR_ELT in a different sized vector element. If this
3769/// is casting to a vector with a smaller element size, perform multiple element
3770/// extracts and merge the results. If this is coercing to a vector with larger
3771/// elements, index the bitcasted vector and extract the target element with bit
3772/// operations. This is intended to force the indexing in the native register
3773/// size for architectures that can dynamically index the register file.
3776 LLT CastTy) {
3777 if (TypeIdx != 1)
3778 return UnableToLegalize;
3779
3780 auto [Dst, DstTy, SrcVec, SrcVecTy, Idx, IdxTy] = MI.getFirst3RegLLTs();
3781
3782 LLT SrcEltTy = SrcVecTy.getElementType();
3783 unsigned NewNumElts = CastTy.isVector() ? CastTy.getNumElements() : 1;
3784 unsigned OldNumElts = SrcVecTy.getNumElements();
3785
3786 LLT NewEltTy = CastTy.getScalarType();
3787 Register CastVec = MIRBuilder.buildBitcast(CastTy, SrcVec).getReg(0);
3788
3789 const unsigned NewEltSize = NewEltTy.getSizeInBits();
3790 const unsigned OldEltSize = SrcEltTy.getSizeInBits();
3791 if (NewNumElts > OldNumElts) {
3792 // Decreasing the vector element size
3793 //
3794 // e.g. i64 = extract_vector_elt x:v2i64, y:i32
3795 // =>
3796 // v4i32:castx = bitcast x:v2i64
3797 //
3798 // i64 = bitcast
3799 // (v2i32 build_vector (i32 (extract_vector_elt castx, (2 * y))),
3800 // (i32 (extract_vector_elt castx, (2 * y + 1)))
3801 //
3802 if (NewNumElts % OldNumElts != 0)
3803 return UnableToLegalize;
3804
3805 // Type of the intermediate result vector.
3806 const unsigned NewEltsPerOldElt = NewNumElts / OldNumElts;
3807 LLT MidTy =
3808 CastTy.changeElementCount(ElementCount::getFixed(NewEltsPerOldElt));
3809
3810 auto NewEltsPerOldEltK = MIRBuilder.buildConstant(IdxTy, NewEltsPerOldElt);
3811
3812 SmallVector<Register, 8> NewOps(NewEltsPerOldElt);
3813 auto NewBaseIdx = MIRBuilder.buildMul(IdxTy, Idx, NewEltsPerOldEltK);
3814
3815 for (unsigned I = 0; I < NewEltsPerOldElt; ++I) {
3816 auto IdxOffset = MIRBuilder.buildConstant(IdxTy, I);
3817 auto TmpIdx = MIRBuilder.buildAdd(IdxTy, NewBaseIdx, IdxOffset);
3818 auto Elt = MIRBuilder.buildExtractVectorElement(NewEltTy, CastVec, TmpIdx);
3819 NewOps[I] = Elt.getReg(0);
3820 }
3821
3822 auto NewVec = MIRBuilder.buildBuildVector(MidTy, NewOps);
3823 MIRBuilder.buildBitcast(Dst, NewVec);
3824 MI.eraseFromParent();
3825 return Legalized;
3826 }
3827
3828 if (NewNumElts < OldNumElts) {
3829 if (NewEltSize % OldEltSize != 0)
3830 return UnableToLegalize;
3831
3832 // This only depends on powers of 2 because we use bit tricks to figure out
3833 // the bit offset we need to shift to get the target element. A general
3834 // expansion could emit division/multiply.
3835 if (!isPowerOf2_32(NewEltSize / OldEltSize))
3836 return UnableToLegalize;
3837
3838 // Increasing the vector element size.
3839 // %elt:_(small_elt) = G_EXTRACT_VECTOR_ELT %vec:_(<N x small_elt>), %idx
3840 //
3841 // =>
3842 //
3843 // %cast = G_BITCAST %vec
3844 // %scaled_idx = G_LSHR %idx, Log2(DstEltSize / SrcEltSize)
3845 // %wide_elt = G_EXTRACT_VECTOR_ELT %cast, %scaled_idx
3846 // %offset_idx = G_AND %idx, ~(-1 << Log2(DstEltSize / SrcEltSize))
3847 // %offset_bits = G_SHL %offset_idx, Log2(SrcEltSize)
3848 // %elt_bits = G_LSHR %wide_elt, %offset_bits
3849 // %elt = G_TRUNC %elt_bits
3850
3851 const unsigned Log2EltRatio = Log2_32(NewEltSize / OldEltSize);
3852 auto Log2Ratio = MIRBuilder.buildConstant(IdxTy, Log2EltRatio);
3853
3854 // Divide to get the index in the wider element type.
3855 auto ScaledIdx = MIRBuilder.buildLShr(IdxTy, Idx, Log2Ratio);
3856
3857 Register WideElt = CastVec;
3858 if (CastTy.isVector()) {
3859 WideElt = MIRBuilder.buildExtractVectorElement(NewEltTy, CastVec,
3860 ScaledIdx).getReg(0);
3861 }
3862
3863 // Compute the bit offset into the register of the target element.
3865 MIRBuilder, Idx, NewEltSize, OldEltSize);
3866
3867 // Shift the wide element to get the target element.
3868 auto ExtractedBits = MIRBuilder.buildLShr(NewEltTy, WideElt, OffsetBits);
3869 MIRBuilder.buildTrunc(Dst, ExtractedBits);
3870 MI.eraseFromParent();
3871 return Legalized;
3872 }
3873
3874 return UnableToLegalize;
3875}
3876
3877/// Emit code to insert \p InsertReg into \p TargetRet at \p OffsetBits in \p
3878/// TargetReg, while preserving other bits in \p TargetReg.
3879///
3880/// (InsertReg << Offset) | (TargetReg & ~(-1 >> InsertReg.size()) << Offset)
3882 Register TargetReg, Register InsertReg,
3883 Register OffsetBits) {
3884 LLT TargetTy = B.getMRI()->getType(TargetReg);
3885 LLT InsertTy = B.getMRI()->getType(InsertReg);
3886 auto ZextVal = B.buildZExt(TargetTy, InsertReg);
3887 auto ShiftedInsertVal = B.buildShl(TargetTy, ZextVal, OffsetBits);
3888
3889 // Produce a bitmask of the value to insert
3890 auto EltMask = B.buildConstant(
3891 TargetTy, APInt::getLowBitsSet(TargetTy.getSizeInBits(),
3892 InsertTy.getSizeInBits()));
3893 // Shift it into position
3894 auto ShiftedMask = B.buildShl(TargetTy, EltMask, OffsetBits);
3895 auto InvShiftedMask = B.buildNot(TargetTy, ShiftedMask);
3896
3897 // Clear out the bits in the wide element
3898 auto MaskedOldElt = B.buildAnd(TargetTy, TargetReg, InvShiftedMask);
3899
3900 // The value to insert has all zeros already, so stick it into the masked
3901 // wide element.
3902 return B.buildOr(TargetTy, MaskedOldElt, ShiftedInsertVal).getReg(0);
3903}
3904
3905/// Perform a G_INSERT_VECTOR_ELT in a different sized vector element. If this
3906/// is increasing the element size, perform the indexing in the target element
3907/// type, and use bit operations to insert at the element position. This is
3908/// intended for architectures that can dynamically index the register file and
3909/// want to force indexing in the native register size.
3912 LLT CastTy) {
3913 if (TypeIdx != 0)
3914 return UnableToLegalize;
3915
3916 auto [Dst, DstTy, SrcVec, SrcVecTy, Val, ValTy, Idx, IdxTy] =
3917 MI.getFirst4RegLLTs();
3918 LLT VecTy = DstTy;
3919
3920 LLT VecEltTy = VecTy.getElementType();
3921 LLT NewEltTy = CastTy.isVector() ? CastTy.getElementType() : CastTy;
3922 const unsigned NewEltSize = NewEltTy.getSizeInBits();
3923 const unsigned OldEltSize = VecEltTy.getSizeInBits();
3924
3925 unsigned NewNumElts = CastTy.isVector() ? CastTy.getNumElements() : 1;
3926 unsigned OldNumElts = VecTy.getNumElements();
3927
3928 Register CastVec = MIRBuilder.buildBitcast(CastTy, SrcVec).getReg(0);
3929 if (NewNumElts < OldNumElts) {
3930 if (NewEltSize % OldEltSize != 0)
3931 return UnableToLegalize;
3932
3933 // This only depends on powers of 2 because we use bit tricks to figure out
3934 // the bit offset we need to shift to get the target element. A general
3935 // expansion could emit division/multiply.
3936 if (!isPowerOf2_32(NewEltSize / OldEltSize))
3937 return UnableToLegalize;
3938
3939 const unsigned Log2EltRatio = Log2_32(NewEltSize / OldEltSize);
3940 auto Log2Ratio = MIRBuilder.buildConstant(IdxTy, Log2EltRatio);
3941
3942 // Divide to get the index in the wider element type.
3943 auto ScaledIdx = MIRBuilder.buildLShr(IdxTy, Idx, Log2Ratio);
3944
3945 Register ExtractedElt = CastVec;
3946 if (CastTy.isVector()) {
3947 ExtractedElt = MIRBuilder.buildExtractVectorElement(NewEltTy, CastVec,
3948 ScaledIdx).getReg(0);
3949 }
3950
3951 // Compute the bit offset into the register of the target element.
3953 MIRBuilder, Idx, NewEltSize, OldEltSize);
3954
3955 Register InsertedElt = buildBitFieldInsert(MIRBuilder, ExtractedElt,
3956 Val, OffsetBits);
3957 if (CastTy.isVector()) {
3958 InsertedElt = MIRBuilder.buildInsertVectorElement(
3959 CastTy, CastVec, InsertedElt, ScaledIdx).getReg(0);
3960 }
3961
3962 MIRBuilder.buildBitcast(Dst, InsertedElt);
3963 MI.eraseFromParent();
3964 return Legalized;
3965 }
3966
3967 return UnableToLegalize;
3968}
3969
3970// This attempts to handle G_CONCAT_VECTORS with illegal operands, particularly
3971// those that have smaller than legal operands.
3972//
3973// <16 x s8> = G_CONCAT_VECTORS <4 x s8>, <4 x s8>, <4 x s8>, <4 x s8>
3974//
3975// ===>
3976//
3977// s32 = G_BITCAST <4 x s8>
3978// s32 = G_BITCAST <4 x s8>
3979// s32 = G_BITCAST <4 x s8>
3980// s32 = G_BITCAST <4 x s8>
3981// <4 x s32> = G_BUILD_VECTOR s32, s32, s32, s32
3982// <16 x s8> = G_BITCAST <4 x s32>
3985 LLT CastTy) {
3986 // Convert it to CONCAT instruction
3987 auto ConcatMI = dyn_cast<GConcatVectors>(&MI);
3988 if (!ConcatMI) {
3989 return UnableToLegalize;
3990 }
3991
3992 // Check if bitcast is Legal
3993 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
3994 LLT SrcScalTy = CastTy.getScalarType();
3995
3996 // Check if the build vector is Legal
3997 if (!LI.isLegal({TargetOpcode::G_BUILD_VECTOR, {CastTy, SrcScalTy}})) {
3998 return UnableToLegalize;
3999 }
4000
4001 // Bitcast the sources
4002 SmallVector<Register> BitcastRegs;
4003 for (unsigned i = 0; i < ConcatMI->getNumSources(); i++) {
4004 BitcastRegs.push_back(
4005 MIRBuilder.buildBitcast(SrcScalTy, ConcatMI->getSourceReg(i))
4006 .getReg(0));
4007 }
4008
4009 // Build the scalar values into a vector
4010 Register BuildReg =
4011 MIRBuilder.buildBuildVector(CastTy, BitcastRegs).getReg(0);
4012 MIRBuilder.buildBitcast(DstReg, BuildReg);
4013
4014 MI.eraseFromParent();
4015 return Legalized;
4016}
4017
4018// This bitcasts a shuffle vector to a different type currently of the same
4019// element size. Mostly used to legalize ptr vectors, where ptrtoint/inttoptr
4020// will be used instead.
4021//
4022// <16 x p0> = G_CONCAT_VECTORS <4 x p0>, <4 x p0>, mask
4023// ===>
4024// <4 x s64> = G_PTRTOINT <4 x p0>
4025// <4 x s64> = G_PTRTOINT <4 x p0>
4026// <16 x s64> = G_CONCAT_VECTORS <4 x s64>, <4 x s64>, mask
4027// <16 x p0> = G_INTTOPTR <16 x s64>
4030 LLT CastTy) {
4031 auto ShuffleMI = cast<GShuffleVector>(&MI);
4032 LLT DstTy = MRI.getType(ShuffleMI->getReg(0));
4033 LLT SrcTy = MRI.getType(ShuffleMI->getReg(1));
4034
4035 // We currently only handle vectors of the same size.
4036 if (TypeIdx != 0 ||
4037 CastTy.getScalarSizeInBits() != DstTy.getScalarSizeInBits() ||
4038 CastTy.getElementCount() != DstTy.getElementCount())
4039 return UnableToLegalize;
4040
4041 LLT NewSrcTy = SrcTy.changeElementType(CastTy.getScalarType());
4042
4043 auto Inp1 = MIRBuilder.buildCast(NewSrcTy, ShuffleMI->getReg(1));
4044 auto Inp2 = MIRBuilder.buildCast(NewSrcTy, ShuffleMI->getReg(2));
4045 auto Shuf =
4046 MIRBuilder.buildShuffleVector(CastTy, Inp1, Inp2, ShuffleMI->getMask());
4047 MIRBuilder.buildCast(ShuffleMI->getReg(0), Shuf);
4048
4049 MI.eraseFromParent();
4050 return Legalized;
4051}
4052
4053/// This attempts to bitcast G_EXTRACT_SUBVECTOR to CastTy.
4054///
4055/// <vscale x 8 x i1> = G_EXTRACT_SUBVECTOR <vscale x 16 x i1>, N
4056///
4057/// ===>
4058///
4059/// <vscale x 2 x i1> = G_BITCAST <vscale x 16 x i1>
4060/// <vscale x 1 x i8> = G_EXTRACT_SUBVECTOR <vscale x 2 x i1>, N / 8
4061/// <vscale x 8 x i1> = G_BITCAST <vscale x 1 x i8>
4064 LLT CastTy) {
4065 auto ES = cast<GExtractSubvector>(&MI);
4066
4067 if (!CastTy.isVector())
4068 return UnableToLegalize;
4069
4070 if (TypeIdx != 0)
4071 return UnableToLegalize;
4072
4073 Register Dst = ES->getReg(0);
4074 Register Src = ES->getSrcVec();
4075 uint64_t Idx = ES->getIndexImm();
4076
4077 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
4078
4079 LLT DstTy = MRI.getType(Dst);
4080 LLT SrcTy = MRI.getType(Src);
4081 ElementCount DstTyEC = DstTy.getElementCount();
4082 ElementCount SrcTyEC = SrcTy.getElementCount();
4083 auto DstTyMinElts = DstTyEC.getKnownMinValue();
4084 auto SrcTyMinElts = SrcTyEC.getKnownMinValue();
4085
4086 if (DstTy == CastTy)
4087 return Legalized;
4088
4089 if (DstTy.getSizeInBits() != CastTy.getSizeInBits())
4090 return UnableToLegalize;
4091
4092 unsigned CastEltSize = CastTy.getElementType().getSizeInBits();
4093 unsigned DstEltSize = DstTy.getElementType().getSizeInBits();
4094 if (CastEltSize < DstEltSize)
4095 return UnableToLegalize;
4096
4097 auto AdjustAmt = CastEltSize / DstEltSize;
4098 if (Idx % AdjustAmt != 0 || DstTyMinElts % AdjustAmt != 0 ||
4099 SrcTyMinElts % AdjustAmt != 0)
4100 return UnableToLegalize;
4101
4102 Idx /= AdjustAmt;
4103 SrcTy = LLT::vector(SrcTyEC.divideCoefficientBy(AdjustAmt), AdjustAmt);
4104 auto CastVec = MIRBuilder.buildBitcast(SrcTy, Src);
4105 auto PromotedES = MIRBuilder.buildExtractSubvector(CastTy, CastVec, Idx);
4106 MIRBuilder.buildBitcast(Dst, PromotedES);
4107
4108 ES->eraseFromParent();
4109 return Legalized;
4110}
4111
4112/// This attempts to bitcast G_INSERT_SUBVECTOR to CastTy.
4113///
4114/// <vscale x 16 x i1> = G_INSERT_SUBVECTOR <vscale x 16 x i1>,
4115/// <vscale x 8 x i1>,
4116/// N
4117///
4118/// ===>
4119///
4120/// <vscale x 2 x i8> = G_BITCAST <vscale x 16 x i1>
4121/// <vscale x 1 x i8> = G_BITCAST <vscale x 8 x i1>
4122/// <vscale x 2 x i8> = G_INSERT_SUBVECTOR <vscale x 2 x i8>,
4123/// <vscale x 1 x i8>, N / 8
4124/// <vscale x 16 x i1> = G_BITCAST <vscale x 2 x i8>
4127 LLT CastTy) {
4128 auto ES = cast<GInsertSubvector>(&MI);
4129
4130 if (!CastTy.isVector())
4131 return UnableToLegalize;
4132
4133 if (TypeIdx != 0)
4134 return UnableToLegalize;
4135
4136 Register Dst = ES->getReg(0);
4137 Register BigVec = ES->getBigVec();
4138 Register SubVec = ES->getSubVec();
4139 uint64_t Idx = ES->getIndexImm();
4140
4141 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
4142
4143 LLT DstTy = MRI.getType(Dst);
4144 LLT BigVecTy = MRI.getType(BigVec);
4145 LLT SubVecTy = MRI.getType(SubVec);
4146
4147 if (DstTy == CastTy)
4148 return Legalized;
4149
4150 if (DstTy.getSizeInBits() != CastTy.getSizeInBits())
4151 return UnableToLegalize;
4152
4153 ElementCount DstTyEC = DstTy.getElementCount();
4154 ElementCount BigVecTyEC = BigVecTy.getElementCount();
4155 ElementCount SubVecTyEC = SubVecTy.getElementCount();
4156 auto DstTyMinElts = DstTyEC.getKnownMinValue();
4157 auto BigVecTyMinElts = BigVecTyEC.getKnownMinValue();
4158 auto SubVecTyMinElts = SubVecTyEC.getKnownMinValue();
4159
4160 unsigned CastEltSize = CastTy.getElementType().getSizeInBits();
4161 unsigned DstEltSize = DstTy.getElementType().getSizeInBits();
4162 if (CastEltSize < DstEltSize)
4163 return UnableToLegalize;
4164
4165 auto AdjustAmt = CastEltSize / DstEltSize;
4166 if (Idx % AdjustAmt != 0 || DstTyMinElts % AdjustAmt != 0 ||
4167 BigVecTyMinElts % AdjustAmt != 0 || SubVecTyMinElts % AdjustAmt != 0)
4168 return UnableToLegalize;
4169
4170 Idx /= AdjustAmt;
4171 BigVecTy = LLT::vector(BigVecTyEC.divideCoefficientBy(AdjustAmt), AdjustAmt);
4172 SubVecTy = LLT::vector(SubVecTyEC.divideCoefficientBy(AdjustAmt), AdjustAmt);
4173 auto CastBigVec = MIRBuilder.buildBitcast(BigVecTy, BigVec);
4174 auto CastSubVec = MIRBuilder.buildBitcast(SubVecTy, SubVec);
4175 auto PromotedIS =
4176 MIRBuilder.buildInsertSubvector(CastTy, CastBigVec, CastSubVec, Idx);
4177 MIRBuilder.buildBitcast(Dst, PromotedIS);
4178
4179 ES->eraseFromParent();
4180 return Legalized;
4181}
4182
4184 // Lower to a memory-width G_LOAD and a G_SEXT/G_ZEXT/G_ANYEXT
4185 Register DstReg = LoadMI.getDstReg();
4186 Register PtrReg = LoadMI.getPointerReg();
4187 LLT DstTy = MRI.getType(DstReg);
4188 MachineMemOperand &MMO = LoadMI.getMMO();
4189 LLT MemTy = MMO.getMemoryType();
4190 MachineFunction &MF = MIRBuilder.getMF();
4191
4192 LLT EltTy = MemTy.getScalarType();
4193
4194 unsigned MemSizeInBits = MemTy.getSizeInBits();
4195 unsigned MemStoreSizeInBits = 8 * MemTy.getSizeInBytes();
4196
4197 if (MemSizeInBits != MemStoreSizeInBits) {
4198 if (MemTy.isVector())
4199 return UnableToLegalize;
4200
4201 // Promote to a byte-sized load if not loading an integral number of
4202 // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
4203 LLT WideMemTy = EltTy.changeElementSize(MemStoreSizeInBits);
4204 MachineMemOperand *NewMMO =
4205 MF.getMachineMemOperand(&MMO, MMO.getPointerInfo(), WideMemTy);
4206
4207 Register LoadReg = DstReg;
4208 LLT LoadTy = DstTy;
4209
4210 // If this wasn't already an extending load, we need to widen the result
4211 // register to avoid creating a load with a narrower result than the source.
4212 if (MemStoreSizeInBits > DstTy.getSizeInBits()) {
4213 LoadTy = WideMemTy;
4214 LoadReg = MRI.createGenericVirtualRegister(WideMemTy);
4215 }
4216
4217 if (isa<GSExtLoad>(LoadMI)) {
4218 auto NewLoad = MIRBuilder.buildLoad(LoadTy, PtrReg, *NewMMO);
4219 MIRBuilder.buildSExtInReg(LoadReg, NewLoad, MemSizeInBits);
4220 } else if (isa<GZExtLoad>(LoadMI) || WideMemTy == LoadTy) {
4221 auto NewLoad = MIRBuilder.buildLoad(LoadTy, PtrReg, *NewMMO);
4222 // The extra bits are guaranteed to be zero, since we stored them that
4223 // way. A zext load from Wide thus automatically gives zext from MemVT.
4224 MIRBuilder.buildAssertZExt(LoadReg, NewLoad, MemSizeInBits);
4225 } else {
4226 MIRBuilder.buildLoad(LoadReg, PtrReg, *NewMMO);
4227 }
4228
4229 if (DstTy != LoadTy)
4230 MIRBuilder.buildTrunc(DstReg, LoadReg);
4231
4232 LoadMI.eraseFromParent();
4233 return Legalized;
4234 }
4235
4236 // Big endian lowering not implemented.
4237 if (MIRBuilder.getDataLayout().isBigEndian())
4238 return UnableToLegalize;
4239
4240 // This load needs splitting into power of 2 sized loads.
4241 //
4242 // Our strategy here is to generate anyextending loads for the smaller
4243 // types up to next power-2 result type, and then combine the two larger
4244 // result values together, before truncating back down to the non-pow-2
4245 // type.
4246 // E.g. v1 = i24 load =>
4247 // v2 = i32 zextload (2 byte)
4248 // v3 = i32 load (1 byte)
4249 // v4 = i32 shl v3, 16
4250 // v5 = i32 or v4, v2
4251 // v1 = i24 trunc v5
4252 // By doing this we generate the correct truncate which should get
4253 // combined away as an artifact with a matching extend.
4254
4255 uint64_t LargeSplitSize, SmallSplitSize;
4256
4257 if (!isPowerOf2_32(MemSizeInBits)) {
4258 // This load needs splitting into power of 2 sized loads.
4259 LargeSplitSize = llvm::bit_floor(MemSizeInBits);
4260 SmallSplitSize = MemSizeInBits - LargeSplitSize;
4261 } else {
4262 // This is already a power of 2, but we still need to split this in half.
4263 //
4264 // Assume we're being asked to decompose an unaligned load.
4265 // TODO: If this requires multiple splits, handle them all at once.
4266 auto &Ctx = MF.getFunction().getContext();
4267 if (TLI.allowsMemoryAccess(Ctx, MIRBuilder.getDataLayout(), MemTy, MMO))
4268 return UnableToLegalize;
4269
4270 SmallSplitSize = LargeSplitSize = MemSizeInBits / 2;
4271 }
4272
4273 if (MemTy.isVector()) {
4274 // TODO: Handle vector extloads
4275 if (MemTy != DstTy)
4276 return UnableToLegalize;
4277
4278 Align Alignment = LoadMI.getAlign();
4279 // Given an alignment larger than the size of the memory, we can increase
4280 // the size of the load without needing to scalarize it.
4281 if (Alignment.value() * 8 > MemSizeInBits &&
4283 LLT MoreTy = DstTy.changeVectorElementCount(
4285 MachineMemOperand *NewMMO = MF.getMachineMemOperand(&MMO, 0, MoreTy);
4286 auto NewLoad = MIRBuilder.buildLoad(MoreTy, PtrReg, *NewMMO);
4287 MIRBuilder.buildDeleteTrailingVectorElements(LoadMI.getReg(0),
4288 NewLoad.getReg(0));
4289 LoadMI.eraseFromParent();
4290 return Legalized;
4291 }
4292
4293 // TODO: We can do better than scalarizing the vector and at least split it
4294 // in half.
4295 return reduceLoadStoreWidth(LoadMI, 0, DstTy.getElementType());
4296 }
4297
4298 MachineMemOperand *LargeMMO =
4299 MF.getMachineMemOperand(&MMO, 0, LargeSplitSize / 8);
4300 MachineMemOperand *SmallMMO =
4301 MF.getMachineMemOperand(&MMO, LargeSplitSize / 8, SmallSplitSize / 8);
4302
4303 LLT PtrTy = MRI.getType(PtrReg);
4304 unsigned AnyExtSize = PowerOf2Ceil(DstTy.getSizeInBits());
4305
4306 LLT AnyExtTy;
4307 LLT OffsetCstRes;
4308 if (EltTy.isPointer()) {
4309 AnyExtTy = LLT::scalar(AnyExtSize);
4310 OffsetCstRes = LLT::scalar(PtrTy.getSizeInBits());
4311 } else {
4312 AnyExtTy = EltTy.changeElementSize(AnyExtSize);
4313 OffsetCstRes = EltTy.changeElementSize(PtrTy.getSizeInBits());
4314 }
4315
4316 auto LargeLoad = MIRBuilder.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, AnyExtTy,
4317 PtrReg, *LargeMMO);
4318
4319 auto OffsetCst = MIRBuilder.buildConstant(OffsetCstRes, LargeSplitSize / 8);
4320 Register PtrAddReg = MRI.createGenericVirtualRegister(PtrTy);
4321 auto SmallPtr = MIRBuilder.buildObjectPtrOffset(PtrAddReg, PtrReg, OffsetCst);
4322 auto SmallLoad = MIRBuilder.buildLoadInstr(LoadMI.getOpcode(), AnyExtTy,
4323 SmallPtr, *SmallMMO);
4324
4325 auto ShiftAmt = MIRBuilder.buildConstant(AnyExtTy, LargeSplitSize);
4326 auto Shift = MIRBuilder.buildShl(AnyExtTy, SmallLoad, ShiftAmt);
4327
4328 if (AnyExtTy == DstTy)
4329 MIRBuilder.buildOr(DstReg, Shift, LargeLoad);
4330 else if (AnyExtTy.getSizeInBits() != DstTy.getSizeInBits()) {
4331 auto Or = MIRBuilder.buildOr(AnyExtTy, Shift, LargeLoad);
4332 MIRBuilder.buildTrunc(DstReg, {Or});
4333 } else {
4334 assert(DstTy.isPointer() && "expected pointer");
4335 auto Or = MIRBuilder.buildOr(AnyExtTy, Shift, LargeLoad);
4336
4337 // FIXME: We currently consider this to be illegal for non-integral address
4338 // spaces, but we need still need a way to reinterpret the bits.
4339 MIRBuilder.buildIntToPtr(DstReg, Or);
4340 }
4341
4342 LoadMI.eraseFromParent();
4343 return Legalized;
4344}
4345
4347 // Lower a non-power of 2 store into multiple pow-2 stores.
4348 // E.g. split an i24 store into an i16 store + i8 store.
4349 // We do this by first extending the stored value to the next largest power
4350 // of 2 type, and then using truncating stores to store the components.
4351 // By doing this, likewise with G_LOAD, generate an extend that can be
4352 // artifact-combined away instead of leaving behind extracts.
4353 Register SrcReg = StoreMI.getValueReg();
4354 Register PtrReg = StoreMI.getPointerReg();
4355 LLT SrcTy = MRI.getType(SrcReg);
4356 MachineFunction &MF = MIRBuilder.getMF();
4357 MachineMemOperand &MMO = **StoreMI.memoperands_begin();
4358 LLT MemTy = MMO.getMemoryType();
4359
4360 unsigned StoreWidth = MemTy.getSizeInBits();
4361 unsigned StoreSizeInBits = 8 * MemTy.getSizeInBytes();
4362
4363 if (StoreWidth != StoreSizeInBits && !SrcTy.isVector()) {
4364 // Promote to a byte-sized store with upper bits zero if not
4365 // storing an integral number of bytes. For example, promote
4366 // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
4367 LLT WideTy = LLT::integer(StoreSizeInBits);
4368
4369 if (StoreSizeInBits > SrcTy.getSizeInBits()) {
4370 // Avoid creating a store with a narrower source than result.
4371 SrcReg = MIRBuilder.buildAnyExt(WideTy, SrcReg).getReg(0);
4372 SrcTy = WideTy;
4373 }
4374
4375 auto ZextInReg = MIRBuilder.buildZExtInReg(SrcTy, SrcReg, StoreWidth);
4376
4377 MachineMemOperand *NewMMO =
4378 MF.getMachineMemOperand(&MMO, MMO.getPointerInfo(), WideTy);
4379 MIRBuilder.buildStore(ZextInReg, PtrReg, *NewMMO);
4380 StoreMI.eraseFromParent();
4381 return Legalized;
4382 }
4383
4384 if (MemTy.isVector()) {
4385 if (MemTy != SrcTy)
4386 return scalarizeVectorBooleanStore(StoreMI);
4387
4388 // TODO: We can do better than scalarizing the vector and at least split it
4389 // in half.
4390 return reduceLoadStoreWidth(StoreMI, 0, SrcTy.getElementType());
4391 }
4392
4393 unsigned MemSizeInBits = MemTy.getSizeInBits();
4394 uint64_t LargeSplitSize, SmallSplitSize;
4395
4396 if (!isPowerOf2_32(MemSizeInBits)) {
4397 LargeSplitSize = llvm::bit_floor<uint64_t>(MemTy.getSizeInBits());
4398 SmallSplitSize = MemTy.getSizeInBits() - LargeSplitSize;
4399 } else {
4400 auto &Ctx = MF.getFunction().getContext();
4401 if (TLI.allowsMemoryAccess(Ctx, MIRBuilder.getDataLayout(), MemTy, MMO))
4402 return UnableToLegalize; // Don't know what we're being asked to do.
4403
4404 SmallSplitSize = LargeSplitSize = MemSizeInBits / 2;
4405 }
4406
4407 // Extend to the next pow-2. If this store was itself the result of lowering,
4408 // e.g. an s56 store being broken into s32 + s24, we might have a stored type
4409 // that's wider than the stored size.
4410 unsigned AnyExtSize = PowerOf2Ceil(MemTy.getSizeInBits());
4411 const LLT NewSrcTy = LLT::integer(AnyExtSize);
4412
4413 if (SrcTy.isPointer()) {
4414 const LLT IntPtrTy = LLT::integer(SrcTy.getSizeInBits());
4415 SrcReg = MIRBuilder.buildPtrToInt(IntPtrTy, SrcReg).getReg(0);
4416 }
4417
4418 auto ExtVal = MIRBuilder.buildAnyExtOrTrunc(NewSrcTy, SrcReg);
4419
4420 // Obtain the smaller value by shifting away the larger value.
4421 auto ShiftAmt = MIRBuilder.buildConstant(NewSrcTy, LargeSplitSize);
4422 auto SmallVal = MIRBuilder.buildLShr(NewSrcTy, ExtVal, ShiftAmt);
4423
4424 // Generate the PtrAdd and truncating stores.
4425 LLT PtrTy = MRI.getType(PtrReg);
4426 auto OffsetCst = MIRBuilder.buildConstant(LLT::integer(PtrTy.getSizeInBits()),
4427 LargeSplitSize / 8);
4428 auto SmallPtr = MIRBuilder.buildObjectPtrOffset(PtrTy, PtrReg, OffsetCst);
4429
4430 MachineMemOperand *LargeMMO =
4431 MF.getMachineMemOperand(&MMO, 0, LargeSplitSize / 8);
4432 MachineMemOperand *SmallMMO =
4433 MF.getMachineMemOperand(&MMO, LargeSplitSize / 8, SmallSplitSize / 8);
4434 MIRBuilder.buildStore(ExtVal, PtrReg, *LargeMMO);
4435 MIRBuilder.buildStore(SmallVal, SmallPtr, *SmallMMO);
4436 StoreMI.eraseFromParent();
4437 return Legalized;
4438}
4439
4442 Register SrcReg = StoreMI.getValueReg();
4443 Register PtrReg = StoreMI.getPointerReg();
4444 LLT SrcTy = MRI.getType(SrcReg);
4445 MachineMemOperand &MMO = **StoreMI.memoperands_begin();
4446 LLT MemTy = MMO.getMemoryType();
4447 LLT MemScalarTy = MemTy.getElementType();
4448 MachineFunction &MF = MIRBuilder.getMF();
4449
4450 assert(SrcTy.isVector() && "Expect a vector store type");
4451
4452 if (!MemScalarTy.isByteSized()) {
4453 // We need to build an integer scalar of the vector bit pattern.
4454 // It's not legal for us to add padding when storing a vector.
4455 unsigned NumBits = MemTy.getSizeInBits();
4456 LLT IntTy = LLT::integer(NumBits);
4457 auto CurrVal = MIRBuilder.buildConstant(IntTy, 0);
4458 LLT IdxTy = TLI.getVectorIdxLLT(MF.getDataLayout());
4459
4460 for (unsigned I = 0, E = MemTy.getNumElements(); I < E; ++I) {
4461 auto Elt = MIRBuilder.buildExtractVectorElement(
4462 SrcTy.getElementType(), SrcReg, MIRBuilder.buildConstant(IdxTy, I));
4463 auto Trunc = MIRBuilder.buildTrunc(MemScalarTy, Elt);
4464 auto ZExt = MIRBuilder.buildZExt(IntTy, Trunc);
4465 unsigned ShiftIntoIdx = MF.getDataLayout().isBigEndian()
4466 ? (MemTy.getNumElements() - 1) - I
4467 : I;
4468 auto ShiftAmt = MIRBuilder.buildConstant(
4469 IntTy, ShiftIntoIdx * MemScalarTy.getSizeInBits());
4470 auto Shifted = MIRBuilder.buildShl(IntTy, ZExt, ShiftAmt);
4471 CurrVal = MIRBuilder.buildOr(IntTy, CurrVal, Shifted);
4472 }
4473 auto PtrInfo = MMO.getPointerInfo();
4474 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, IntTy);
4475 MIRBuilder.buildStore(CurrVal, PtrReg, *NewMMO);
4476 StoreMI.eraseFromParent();
4477 return Legalized;
4478 }
4479
4480 // TODO: implement simple scalarization.
4481 return UnableToLegalize;
4482}
4483
4485LegalizerHelper::bitcast(MachineInstr &MI, unsigned TypeIdx, LLT CastTy) {
4486 switch (MI.getOpcode()) {
4487 case TargetOpcode::G_LOAD: {
4488 if (TypeIdx != 0)
4489 return UnableToLegalize;
4490 MachineMemOperand &MMO = **MI.memoperands_begin();
4491
4492 // Not sure how to interpret a bitcast of an extending load.
4493 if (MMO.getMemoryType().getSizeInBits() != CastTy.getSizeInBits())
4494 return UnableToLegalize;
4495
4496 Observer.changingInstr(MI);
4497 bitcastDst(MI, CastTy, 0);
4498 MMO.setType(CastTy);
4499 // The range metadata is no longer valid when reinterpreted as a different
4500 // type.
4501 MMO.clearRanges();
4502 Observer.changedInstr(MI);
4503 return Legalized;
4504 }
4505 case TargetOpcode::G_STORE: {
4506 if (TypeIdx != 0)
4507 return UnableToLegalize;
4508
4509 MachineMemOperand &MMO = **MI.memoperands_begin();
4510
4511 // Not sure how to interpret a bitcast of a truncating store.
4512 if (MMO.getMemoryType().getSizeInBits() != CastTy.getSizeInBits())
4513 return UnableToLegalize;
4514
4515 Observer.changingInstr(MI);
4516 bitcastSrc(MI, CastTy, 0);
4517 MMO.setType(CastTy);
4518 Observer.changedInstr(MI);
4519 return Legalized;
4520 }
4521 case TargetOpcode::G_SELECT: {
4522 if (TypeIdx != 0)
4523 return UnableToLegalize;
4524
4525 if (MRI.getType(MI.getOperand(1).getReg()).isVector()) {
4526 LLVM_DEBUG(
4527 dbgs() << "bitcast action not implemented for vector select\n");
4528 return UnableToLegalize;
4529 }
4530
4531 Observer.changingInstr(MI);
4532 bitcastSrc(MI, CastTy, 2);
4533 bitcastSrc(MI, CastTy, 3);
4534 bitcastDst(MI, CastTy, 0);
4535 Observer.changedInstr(MI);
4536 return Legalized;
4537 }
4538 case TargetOpcode::G_AND:
4539 case TargetOpcode::G_OR:
4540 case TargetOpcode::G_XOR: {
4541 Observer.changingInstr(MI);
4542 bitcastSrc(MI, CastTy, 1);
4543 bitcastSrc(MI, CastTy, 2);
4544 bitcastDst(MI, CastTy, 0);
4545 Observer.changedInstr(MI);
4546 return Legalized;
4547 }
4548 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
4549 return bitcastExtractVectorElt(MI, TypeIdx, CastTy);
4550 case TargetOpcode::G_INSERT_VECTOR_ELT:
4551 return bitcastInsertVectorElt(MI, TypeIdx, CastTy);
4552 case TargetOpcode::G_CONCAT_VECTORS:
4553 return bitcastConcatVector(MI, TypeIdx, CastTy);
4554 case TargetOpcode::G_SHUFFLE_VECTOR:
4555 return bitcastShuffleVector(MI, TypeIdx, CastTy);
4556 case TargetOpcode::G_EXTRACT_SUBVECTOR:
4557 return bitcastExtractSubvector(MI, TypeIdx, CastTy);
4558 case TargetOpcode::G_INSERT_SUBVECTOR:
4559 return bitcastInsertSubvector(MI, TypeIdx, CastTy);
4560 default:
4561 return UnableToLegalize;
4562 }
4563}
4564
4565// Legalize an instruction by changing the opcode in place.
4566void LegalizerHelper::changeOpcode(MachineInstr &MI, unsigned NewOpcode) {
4568 MI.setDesc(MIRBuilder.getTII().get(NewOpcode));
4570}
4571
4573LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT LowerHintTy) {
4574 using namespace TargetOpcode;
4575
4576 switch(MI.getOpcode()) {
4577 default:
4578 return UnableToLegalize;
4579 case TargetOpcode::G_FCONSTANT:
4580 return lowerFConstant(MI);
4581 case TargetOpcode::G_BITCAST:
4582 return lowerBitcast(MI);
4583 case TargetOpcode::G_SREM:
4584 case TargetOpcode::G_UREM: {
4585 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
4586 auto Quot =
4587 MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV, {Ty},
4588 {MI.getOperand(1), MI.getOperand(2)});
4589
4590 auto Prod = MIRBuilder.buildMul(Ty, Quot, MI.getOperand(2));
4591 MIRBuilder.buildSub(MI.getOperand(0), MI.getOperand(1), Prod);
4592 MI.eraseFromParent();
4593 return Legalized;
4594 }
4595 case TargetOpcode::G_SADDO:
4596 case TargetOpcode::G_SSUBO:
4597 return lowerSADDO_SSUBO(MI);
4598 case TargetOpcode::G_SADDE:
4599 return lowerSADDE(MI);
4600 case TargetOpcode::G_SSUBE:
4601 return lowerSSUBE(MI);
4602 case TargetOpcode::G_UMULH:
4603 case TargetOpcode::G_SMULH:
4604 return lowerSMULH_UMULH(MI);
4605 case TargetOpcode::G_SMULO:
4606 case TargetOpcode::G_UMULO: {
4607 // Generate G_UMULH/G_SMULH to check for overflow and a normal G_MUL for the
4608 // result.
4609 auto [Res, Overflow, LHS, RHS] = MI.getFirst4Regs();
4610 LLT Ty = MRI.getType(Res);
4611
4612 unsigned Opcode = MI.getOpcode() == TargetOpcode::G_SMULO
4613 ? TargetOpcode::G_SMULH
4614 : TargetOpcode::G_UMULH;
4615
4616 Observer.changingInstr(MI);
4617 const auto &TII = MIRBuilder.getTII();
4618 MI.setDesc(TII.get(TargetOpcode::G_MUL));
4619 MI.removeOperand(1);
4620 Observer.changedInstr(MI);
4621
4622 auto HiPart = MIRBuilder.buildInstr(Opcode, {Ty}, {LHS, RHS});
4623 auto Zero = MIRBuilder.buildConstant(Ty, 0);
4624
4625 // Move insert point forward so we can use the Res register if needed.
4626 MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
4627
4628 // For *signed* multiply, overflow is detected by checking:
4629 // (hi != (lo >> bitwidth-1))
4630 if (Opcode == TargetOpcode::G_SMULH) {
4631 auto ShiftAmt = MIRBuilder.buildConstant(Ty, Ty.getSizeInBits() - 1);
4632 auto Shifted = MIRBuilder.buildAShr(Ty, Res, ShiftAmt);
4633 MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Shifted);
4634 } else {
4635 MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Zero);
4636 }
4637 return Legalized;
4638 }
4639 case TargetOpcode::G_FNEG: {
4640 auto [Res, ResTy, SubByReg, SubByRegTy] = MI.getFirst2RegLLTs();
4641 LLT TyInt =
4642 ResTy.changeElementType(LLT::integer(ResTy.getScalarSizeInBits()));
4643 Register CastedSubByReg = SubByReg;
4644
4645 if (!SubByRegTy.getScalarType().isAnyScalar() &&
4646 !SubByRegTy.getScalarType().isInteger()) {
4647 auto BitcastDst = SubByRegTy.changeElementType(
4648 LLT::integer(SubByRegTy.getScalarSizeInBits()));
4649 CastedSubByReg = MIRBuilder.buildBitcast(BitcastDst, SubByReg).getReg(0);
4650 }
4651
4652 auto SignMask = MIRBuilder.buildConstant(
4653 TyInt, APInt::getSignMask(TyInt.getScalarSizeInBits()));
4654
4655 if (ResTy != TyInt) {
4656 Register NewDst =
4657 MIRBuilder.buildXor(TyInt, CastedSubByReg, SignMask).getReg(0);
4658 MIRBuilder.buildBitcast(Res, NewDst);
4659 } else
4660 MIRBuilder.buildXor(Res, CastedSubByReg, SignMask).getReg(0);
4661
4662 MI.eraseFromParent();
4663 return Legalized;
4664 }
4665 case TargetOpcode::G_FSUB:
4666 case TargetOpcode::G_STRICT_FSUB: {
4667 auto [Res, LHS, RHS] = MI.getFirst3Regs();
4668 LLT Ty = MRI.getType(Res);
4669
4670 // Lower (G_FSUB LHS, RHS) to (G_FADD LHS, (G_FNEG RHS)).
4671 auto Neg = MIRBuilder.buildFNeg(Ty, RHS);
4672
4673 if (MI.getOpcode() == TargetOpcode::G_STRICT_FSUB)
4674 MIRBuilder.buildStrictFAdd(Res, LHS, Neg, MI.getFlags());
4675 else
4676 MIRBuilder.buildFAdd(Res, LHS, Neg, MI.getFlags());
4677
4678 MI.eraseFromParent();
4679 return Legalized;
4680 }
4681 case TargetOpcode::G_FMAD:
4682 return lowerFMad(MI);
4683 case TargetOpcode::G_FFLOOR:
4684 return lowerFFloor(MI);
4685 case TargetOpcode::G_LROUND:
4686 case TargetOpcode::G_LLROUND: {
4687 Register DstReg = MI.getOperand(0).getReg();
4688 Register SrcReg = MI.getOperand(1).getReg();
4689 LLT SrcTy = MRI.getType(SrcReg);
4690 auto Round = MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_ROUND, {SrcTy},
4691 {SrcReg});
4692 MIRBuilder.buildFPTOSI(DstReg, Round);
4693 MI.eraseFromParent();
4694 return Legalized;
4695 }
4696 case TargetOpcode::G_INTRINSIC_ROUND:
4697 return lowerIntrinsicRound(MI);
4698 case TargetOpcode::G_FRINT: {
4699 // Since round even is the assumed rounding mode for unconstrained FP
4700 // operations, rint and roundeven are the same operation.
4701 changeOpcode(MI, TargetOpcode::G_INTRINSIC_ROUNDEVEN);
4702 return Legalized;
4703 }
4704 case TargetOpcode::G_INTRINSIC_LRINT:
4705 case TargetOpcode::G_INTRINSIC_LLRINT: {
4706 Register DstReg = MI.getOperand(0).getReg();
4707 Register SrcReg = MI.getOperand(1).getReg();
4708 LLT SrcTy = MRI.getType(SrcReg);
4709 auto Round =
4710 MIRBuilder.buildInstr(TargetOpcode::G_FRINT, {SrcTy}, {SrcReg});
4711 MIRBuilder.buildFPTOSI(DstReg, Round);
4712 MI.eraseFromParent();
4713 return Legalized;
4714 }
4715 case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: {
4716 auto [OldValRes, SuccessRes, Addr, CmpVal, NewVal] = MI.getFirst5Regs();
4717 Register NewOldValRes = MRI.cloneVirtualRegister(OldValRes);
4718 MIRBuilder.buildAtomicCmpXchg(NewOldValRes, Addr, CmpVal, NewVal,
4719 **MI.memoperands_begin());
4720 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, SuccessRes, NewOldValRes, CmpVal);
4721 MIRBuilder.buildCopy(OldValRes, NewOldValRes);
4722 MI.eraseFromParent();
4723 return Legalized;
4724 }
4725 case TargetOpcode::G_LOAD:
4726 case TargetOpcode::G_SEXTLOAD:
4727 case TargetOpcode::G_ZEXTLOAD:
4728 return lowerLoad(cast<GAnyLoad>(MI));
4729 case TargetOpcode::G_STORE:
4730 return lowerStore(cast<GStore>(MI));
4731 case TargetOpcode::G_CTLZ_ZERO_POISON:
4732 case TargetOpcode::G_CTTZ_ZERO_POISON:
4733 case TargetOpcode::G_CTLZ:
4734 case TargetOpcode::G_CTTZ:
4735 case TargetOpcode::G_CTPOP:
4736 case TargetOpcode::G_CTLS:
4737 return lowerBitCount(MI);
4738 case G_UADDO: {
4739 auto [Res, CarryOut, LHS, RHS] = MI.getFirst4Regs();
4740
4741 Register NewRes = MRI.cloneVirtualRegister(Res);
4742
4743 MIRBuilder.buildAdd(NewRes, LHS, RHS);
4744 MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, NewRes, RHS);
4745
4746 MIRBuilder.buildCopy(Res, NewRes);
4747
4748 MI.eraseFromParent();
4749 return Legalized;
4750 }
4751 case G_UADDE: {
4752 auto [Res, CarryOut, LHS, RHS, CarryIn] = MI.getFirst5Regs();
4753 const LLT CondTy = MRI.getType(CarryOut);
4754 const LLT Ty = MRI.getType(Res);
4755
4756 Register NewRes = MRI.cloneVirtualRegister(Res);
4757
4758 // Initial add of the two operands.
4759 auto TmpRes = MIRBuilder.buildAdd(Ty, LHS, RHS);
4760
4761 // Initial check for carry.
4762 auto Carry = MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CondTy, TmpRes, LHS);
4763
4764 // Add the sum and the carry.
4765 auto ZExtCarryIn = MIRBuilder.buildZExt(Ty, CarryIn);
4766 MIRBuilder.buildAdd(NewRes, TmpRes, ZExtCarryIn);
4767
4768 // Second check for carry. We can only carry if the initial sum is all 1s
4769 // and the carry is set, resulting in a new sum of 0.
4770 auto Zero = MIRBuilder.buildConstant(Ty, 0);
4771 auto ResEqZero =
4772 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, CondTy, NewRes, Zero);
4773 auto Carry2 = MIRBuilder.buildAnd(CondTy, ResEqZero, CarryIn);
4774 MIRBuilder.buildOr(CarryOut, Carry, Carry2);
4775
4776 MIRBuilder.buildCopy(Res, NewRes);
4777
4778 MI.eraseFromParent();
4779 return Legalized;
4780 }
4781 case G_USUBO: {
4782 auto [Res, BorrowOut, LHS, RHS] = MI.getFirst4Regs();
4783
4784 MIRBuilder.buildSub(Res, LHS, RHS);
4785 MIRBuilder.buildICmp(CmpInst::ICMP_ULT, BorrowOut, LHS, RHS);
4786
4787 MI.eraseFromParent();
4788 return Legalized;
4789 }
4790 case G_USUBE: {
4791 auto [Res, BorrowOut, LHS, RHS, BorrowIn] = MI.getFirst5Regs();
4792 const LLT CondTy = MRI.getType(BorrowOut);
4793 const LLT Ty = MRI.getType(Res);
4794
4795 // Initial subtract of the two operands.
4796 auto TmpRes = MIRBuilder.buildSub(Ty, LHS, RHS);
4797
4798 // Initial check for borrow.
4799 auto Borrow = MIRBuilder.buildICmp(CmpInst::ICMP_UGT, CondTy, TmpRes, LHS);
4800
4801 // Subtract the borrow from the first subtract.
4802 auto ZExtBorrowIn = MIRBuilder.buildZExt(Ty, BorrowIn);
4803 MIRBuilder.buildSub(Res, TmpRes, ZExtBorrowIn);
4804
4805 // Second check for borrow. We can only borrow if the initial difference is
4806 // 0 and the borrow is set, resulting in a new difference of all 1s.
4807 auto Zero = MIRBuilder.buildConstant(Ty, 0);
4808 auto TmpResEqZero =
4809 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, CondTy, TmpRes, Zero);
4810 auto Borrow2 = MIRBuilder.buildAnd(CondTy, TmpResEqZero, BorrowIn);
4811 MIRBuilder.buildOr(BorrowOut, Borrow, Borrow2);
4812
4813 MI.eraseFromParent();
4814 return Legalized;
4815 }
4816 case G_UITOFP:
4817 return lowerUITOFP(MI);
4818 case G_SITOFP:
4819 return lowerSITOFP(MI);
4820 case G_FPTOUI:
4821 return lowerFPTOUI(MI);
4822 case G_FPTOSI:
4823 return lowerFPTOSI(MI);
4824 case G_FPTOUI_SAT:
4825 case G_FPTOSI_SAT:
4826 return lowerFPTOINT_SAT(MI);
4827 case G_FPEXT:
4828 return lowerFPExtAndTruncMem(MI);
4829 case G_FPTRUNC:
4830 return lowerFPTRUNC(MI);
4831 case G_FPOWI:
4832 return lowerFPOWI(MI);
4833 case G_FMODF:
4834 return lowerFMODF(MI);
4835 case G_SMIN:
4836 case G_SMAX:
4837 case G_UMIN:
4838 case G_UMAX:
4839 return lowerMinMax(MI);
4840 case G_SCMP:
4841 case G_UCMP:
4842 return lowerThreewayCompare(MI);
4843 case G_FCOPYSIGN:
4844 return lowerFCopySign(MI);
4845 case G_FMINNUM:
4846 case G_FMAXNUM:
4847 case G_FMINIMUMNUM:
4848 case G_FMAXIMUMNUM:
4849 return lowerFMinNumMaxNum(MI);
4850 case G_FMINIMUM:
4851 case G_FMAXIMUM:
4852 return lowerFMinimumMaximum(MI);
4853 case G_MERGE_VALUES:
4854 return lowerMergeValues(MI);
4855 case G_UNMERGE_VALUES:
4856 return lowerUnmergeValues(MI);
4857 case TargetOpcode::G_SEXT_INREG: {
4858 assert(MI.getOperand(2).isImm() && "Expected immediate");
4859 int64_t SizeInBits = MI.getOperand(2).getImm();
4860
4861 auto [DstReg, SrcReg] = MI.getFirst2Regs();
4862 LLT DstTy = MRI.getType(DstReg);
4863 Register TmpRes = MRI.createGenericVirtualRegister(DstTy);
4864
4865 auto MIBSz = MIRBuilder.buildConstant(DstTy, DstTy.getScalarSizeInBits() - SizeInBits);
4866 MIRBuilder.buildShl(TmpRes, SrcReg, MIBSz->getOperand(0));
4867 MIRBuilder.buildAShr(DstReg, TmpRes, MIBSz->getOperand(0));
4868 MI.eraseFromParent();
4869 return Legalized;
4870 }
4871 case G_EXTRACT_VECTOR_ELT:
4872 case G_INSERT_VECTOR_ELT:
4874 case G_SHUFFLE_VECTOR:
4875 return lowerShuffleVector(MI);
4876 case G_VECTOR_COMPRESS:
4877 return lowerVECTOR_COMPRESS(MI);
4878 case G_DYN_STACKALLOC:
4879 return lowerDynStackAlloc(MI);
4880 case G_STACKSAVE:
4881 return lowerStackSave(MI);
4882 case G_STACKRESTORE:
4883 return lowerStackRestore(MI);
4884 case G_EXTRACT:
4885 return lowerExtract(MI);
4886 case G_INSERT:
4887 return lowerInsert(MI);
4888 case G_BSWAP:
4889 return lowerBswap(MI);
4890 case G_BITREVERSE:
4891 return lowerBitreverse(MI);
4892 case G_READ_REGISTER:
4893 case G_WRITE_REGISTER:
4894 return lowerReadWriteRegister(MI);
4895 case G_UADDSAT:
4896 case G_USUBSAT: {
4897 // Try to make a reasonable guess about which lowering strategy to use. The
4898 // target can override this with custom lowering and calling the
4899 // implementation functions.
4900 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
4901 if (LI.isLegalOrCustom({G_UMIN, Ty}))
4902 return lowerAddSubSatToMinMax(MI);
4904 }
4905 case G_SADDSAT:
4906 case G_SSUBSAT: {
4907 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
4908
4909 // FIXME: It would probably make more sense to see if G_SADDO is preferred,
4910 // since it's a shorter expansion. However, we would need to figure out the
4911 // preferred boolean type for the carry out for the query.
4912 if (LI.isLegalOrCustom({G_SMIN, Ty}) && LI.isLegalOrCustom({G_SMAX, Ty}))
4913 return lowerAddSubSatToMinMax(MI);
4915 }
4916 case G_SSHLSAT:
4917 case G_USHLSAT:
4918 return lowerShlSat(MI);
4919 case G_ABS:
4920 return lowerAbsToAddXor(MI);
4921 case G_ABDS:
4922 case G_ABDU: {
4923 bool IsSigned = MI.getOpcode() == G_ABDS;
4924 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
4925 if ((IsSigned && LI.isLegal({G_SMIN, Ty}) && LI.isLegal({G_SMAX, Ty})) ||
4926 (!IsSigned && LI.isLegal({G_UMIN, Ty}) && LI.isLegal({G_UMAX, Ty}))) {
4927 return lowerAbsDiffToMinMax(MI);
4928 }
4929 return lowerAbsDiffToSelect(MI);
4930 }
4931 case G_FABS:
4932 return lowerFAbs(MI);
4933 case G_SELECT:
4934 return lowerSelect(MI);
4935 case G_IS_FPCLASS:
4936 return lowerISFPCLASS(MI);
4937 case G_SDIVREM:
4938 case G_UDIVREM:
4939 return lowerDIVREM(MI);
4940 case G_FSHL:
4941 case G_FSHR:
4942 return lowerFunnelShift(MI);
4943 case G_ROTL:
4944 case G_ROTR:
4945 return lowerRotate(MI);
4946 case G_MEMSET:
4947 case G_MEMCPY:
4948 case G_MEMMOVE:
4949 return lowerMemCpyFamily(MI);
4950 case G_MEMCPY_INLINE:
4951 return lowerMemcpyInline(MI);
4952 case G_ZEXT:
4953 case G_SEXT:
4954 case G_ANYEXT:
4955 return lowerEXT(MI);
4956 case G_TRUNC:
4957 return lowerTRUNC(MI);
4959 return lowerVectorReduction(MI);
4960 case G_VAARG:
4961 return lowerVAArg(MI);
4962 case G_ATOMICRMW_SUB: {
4963 auto [Ret, Mem, Val] = MI.getFirst3Regs();
4964 const LLT ValTy = MRI.getType(Val);
4965 MachineMemOperand *MMO = *MI.memoperands_begin();
4966
4967 auto VNeg = MIRBuilder.buildNeg(ValTy, Val);
4968 MIRBuilder.buildAtomicRMW(G_ATOMICRMW_ADD, Ret, Mem, VNeg, *MMO);
4969 MI.eraseFromParent();
4970 return Legalized;
4971 }
4972 case G_SMULFIX:
4973 case G_UMULFIX:
4974 return lowerMulfix(MI);
4975 }
4976}
4977
4979 Align MinAlign) const {
4980 // FIXME: We're missing a way to go back from LLT to llvm::Type to query the
4981 // datalayout for the preferred alignment. Also there should be a target hook
4982 // for this to allow targets to reduce the alignment and ignore the
4983 // datalayout. e.g. AMDGPU should always use a 4-byte alignment, regardless of
4984 // the type.
4985 return std::max(Align(PowerOf2Ceil(Ty.getSizeInBytes())), MinAlign);
4986}
4987
4990 MachinePointerInfo &PtrInfo) {
4991 MachineFunction &MF = MIRBuilder.getMF();
4992 const DataLayout &DL = MIRBuilder.getDataLayout();
4993 int FrameIdx = MF.getFrameInfo().CreateStackObject(Bytes, Alignment, false);
4994
4995 unsigned AddrSpace = DL.getAllocaAddrSpace();
4996 LLT FramePtrTy = LLT::pointer(AddrSpace, DL.getPointerSizeInBits(AddrSpace));
4997
4998 PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIdx);
4999 return MIRBuilder.buildFrameIndex(FramePtrTy, FrameIdx);
5000}
5001
5003 const SrcOp &Val) {
5004 LLT SrcTy = Val.getLLTTy(MRI);
5005 Align StackTypeAlign =
5006 std::max(getStackTemporaryAlignment(SrcTy),
5008 MachinePointerInfo PtrInfo;
5009 auto StackTemp =
5010 createStackTemporary(SrcTy.getSizeInBytes(), StackTypeAlign, PtrInfo);
5011
5012 MIRBuilder.buildStore(Val, StackTemp, PtrInfo, StackTypeAlign);
5013 return MIRBuilder.buildLoad(Res, StackTemp, PtrInfo, StackTypeAlign);
5014}
5015
5017 LLT VecTy) {
5018 LLT IdxTy = B.getMRI()->getType(IdxReg);
5019 unsigned NElts = VecTy.getNumElements();
5020
5021 int64_t IdxVal;
5022 if (mi_match(IdxReg, *B.getMRI(), m_ICst(IdxVal))) {
5023 if (IdxVal < VecTy.getNumElements())
5024 return IdxReg;
5025 // If a constant index would be out of bounds, clamp it as well.
5026 }
5027
5028 if (isPowerOf2_32(NElts)) {
5029 APInt Imm = APInt::getLowBitsSet(IdxTy.getSizeInBits(), Log2_32(NElts));
5030 return B.buildAnd(IdxTy, IdxReg, B.buildConstant(IdxTy, Imm)).getReg(0);
5031 }
5032
5033 return B.buildUMin(IdxTy, IdxReg, B.buildConstant(IdxTy, NElts - 1))
5034 .getReg(0);
5035}
5036
5038 Register Index) {
5039 LLT EltTy = VecTy.getElementType();
5040
5041 // Calculate the element offset and add it to the pointer.
5042 unsigned EltSize = EltTy.getSizeInBits() / 8; // FIXME: should be ABI size.
5043 assert(EltSize * 8 == EltTy.getSizeInBits() &&
5044 "Converting bits to bytes lost precision");
5045
5046 Index = clampVectorIndex(MIRBuilder, Index, VecTy);
5047
5048 // Convert index to the correct size for the address space.
5049 const DataLayout &DL = MIRBuilder.getDataLayout();
5050 unsigned AS = MRI.getType(VecPtr).getAddressSpace();
5051 unsigned IndexSizeInBits = DL.getIndexSize(AS) * 8;
5052 LLT IdxTy = MRI.getType(Index).changeElementSize(IndexSizeInBits);
5053 if (IdxTy != MRI.getType(Index))
5054 Index = MIRBuilder.buildSExtOrTrunc(IdxTy, Index).getReg(0);
5055
5056 auto Mul = MIRBuilder.buildMul(IdxTy, Index,
5057 MIRBuilder.buildConstant(IdxTy, EltSize));
5058
5059 LLT PtrTy = MRI.getType(VecPtr);
5060 return MIRBuilder.buildPtrAdd(PtrTy, VecPtr, Mul).getReg(0);
5061}
5062
5063#ifndef NDEBUG
5064/// Check that all vector operands have same number of elements. Other operands
5065/// should be listed in NonVecOp.
5068 std::initializer_list<unsigned> NonVecOpIndices) {
5069 if (MI.getNumMemOperands() != 0)
5070 return false;
5071
5072 LLT VecTy = MRI.getType(MI.getReg(0));
5073 if (!VecTy.isVector())
5074 return false;
5075 unsigned NumElts = VecTy.getNumElements();
5076
5077 for (unsigned OpIdx = 1; OpIdx < MI.getNumOperands(); ++OpIdx) {
5078 MachineOperand &Op = MI.getOperand(OpIdx);
5079 if (!Op.isReg()) {
5080 if (!is_contained(NonVecOpIndices, OpIdx))
5081 return false;
5082 continue;
5083 }
5084
5085 LLT Ty = MRI.getType(Op.getReg());
5086 if (!Ty.isVector()) {
5087 if (!is_contained(NonVecOpIndices, OpIdx))
5088 return false;
5089 continue;
5090 }
5091
5092 if (Ty.getNumElements() != NumElts)
5093 return false;
5094 }
5095
5096 return true;
5097}
5098#endif
5099
5100/// Fill \p DstOps with DstOps that have same number of elements combined as
5101/// the Ty. These DstOps have either scalar type when \p NumElts = 1 or are
5102/// vectors with \p NumElts elements. When Ty.getNumElements() is not multiple
5103/// of \p NumElts last DstOp (leftover) has fewer then \p NumElts elements.
5104static void makeDstOps(SmallVectorImpl<DstOp> &DstOps, LLT Ty,
5105 unsigned NumElts) {
5106 LLT LeftoverTy;
5107 assert(Ty.isVector() && "Expected vector type");
5108 LLT NarrowTy = Ty.changeElementCount(ElementCount::getFixed(NumElts));
5109 int NumParts, NumLeftover;
5110 std::tie(NumParts, NumLeftover) =
5111 getNarrowTypeBreakDown(Ty, NarrowTy, LeftoverTy);
5112
5113 assert(NumParts > 0 && "Error in getNarrowTypeBreakDown");
5114 for (int i = 0; i < NumParts; ++i) {
5115 DstOps.push_back(NarrowTy);
5116 }
5117
5118 if (LeftoverTy.isValid()) {
5119 assert(NumLeftover == 1 && "expected exactly one leftover");
5120 DstOps.push_back(LeftoverTy);
5121 }
5122}
5123
5124/// Operand \p Op is used on \p N sub-instructions. Fill \p Ops with \p N SrcOps
5125/// made from \p Op depending on operand type.
5127 MachineOperand &Op) {
5128 for (unsigned i = 0; i < N; ++i) {
5129 if (Op.isReg())
5130 Ops.push_back(Op.getReg());
5131 else if (Op.isImm())
5132 Ops.push_back(Op.getImm());
5133 else if (Op.isPredicate())
5134 Ops.push_back(static_cast<CmpInst::Predicate>(Op.getPredicate()));
5135 else
5136 llvm_unreachable("Unsupported type");
5137 }
5138}
5139
5140// Handle splitting vector operations which need to have the same number of
5141// elements in each type index, but each type index may have a different element
5142// type.
5143//
5144// e.g. <4 x s64> = G_SHL <4 x s64>, <4 x s32> ->
5145// <2 x s64> = G_SHL <2 x s64>, <2 x s32>
5146// <2 x s64> = G_SHL <2 x s64>, <2 x s32>
5147//
5148// Also handles some irregular breakdown cases, e.g.
5149// e.g. <3 x s64> = G_SHL <3 x s64>, <3 x s32> ->
5150// <2 x s64> = G_SHL <2 x s64>, <2 x s32>
5151// s64 = G_SHL s64, s32
5154 GenericMachineInstr &MI, unsigned NumElts,
5155 std::initializer_list<unsigned> NonVecOpIndices) {
5156 assert(hasSameNumEltsOnAllVectorOperands(MI, MRI, NonVecOpIndices) &&
5157 "Non-compatible opcode or not specified non-vector operands");
5158 unsigned OrigNumElts = MRI.getType(MI.getReg(0)).getNumElements();
5159
5160 unsigned NumInputs = MI.getNumOperands() - MI.getNumDefs();
5161 unsigned NumDefs = MI.getNumDefs();
5162
5163 // Create DstOps (sub-vectors with NumElts elts + Leftover) for each output.
5164 // Build instructions with DstOps to use instruction found by CSE directly.
5165 // CSE copies found instruction into given vreg when building with vreg dest.
5166 SmallVector<SmallVector<DstOp, 8>, 2> OutputOpsPieces(NumDefs);
5167 // Output registers will be taken from created instructions.
5168 SmallVector<SmallVector<Register, 8>, 2> OutputRegs(NumDefs);
5169 for (unsigned i = 0; i < NumDefs; ++i) {
5170 makeDstOps(OutputOpsPieces[i], MRI.getType(MI.getReg(i)), NumElts);
5171 }
5172
5173 // Split vector input operands into sub-vectors with NumElts elts + Leftover.
5174 // Operands listed in NonVecOpIndices will be used as is without splitting;
5175 // examples: compare predicate in icmp and fcmp (op 1), vector select with i1
5176 // scalar condition (op 1), immediate in sext_inreg (op 2).
5177 SmallVector<SmallVector<SrcOp, 8>, 3> InputOpsPieces(NumInputs);
5178 for (unsigned UseIdx = NumDefs, UseNo = 0; UseIdx < MI.getNumOperands();
5179 ++UseIdx, ++UseNo) {
5180 if (is_contained(NonVecOpIndices, UseIdx)) {
5181 broadcastSrcOp(InputOpsPieces[UseNo], OutputOpsPieces[0].size(),
5182 MI.getOperand(UseIdx));
5183 } else {
5184 SmallVector<Register, 8> SplitPieces;
5185 extractVectorParts(MI.getReg(UseIdx), NumElts, SplitPieces, MIRBuilder,
5186 MRI);
5187 llvm::append_range(InputOpsPieces[UseNo], SplitPieces);
5188 }
5189 }
5190
5191 unsigned NumLeftovers = OrigNumElts % NumElts ? 1 : 0;
5192
5193 // Take i-th piece of each input operand split and build sub-vector/scalar
5194 // instruction. Set i-th DstOp(s) from OutputOpsPieces as destination(s).
5195 for (unsigned i = 0; i < OrigNumElts / NumElts + NumLeftovers; ++i) {
5197 for (unsigned DstNo = 0; DstNo < NumDefs; ++DstNo)
5198 Defs.push_back(OutputOpsPieces[DstNo][i]);
5199
5201 for (unsigned InputNo = 0; InputNo < NumInputs; ++InputNo)
5202 Uses.push_back(InputOpsPieces[InputNo][i]);
5203
5204 auto I = MIRBuilder.buildInstr(MI.getOpcode(), Defs, Uses, MI.getFlags());
5205 for (unsigned DstNo = 0; DstNo < NumDefs; ++DstNo)
5206 OutputRegs[DstNo].push_back(I.getReg(DstNo));
5207 }
5208
5209 // Merge small outputs into MI's output for each def operand.
5210 if (NumLeftovers) {
5211 for (unsigned i = 0; i < NumDefs; ++i)
5212 mergeMixedSubvectors(MI.getReg(i), OutputRegs[i]);
5213 } else {
5214 for (unsigned i = 0; i < NumDefs; ++i)
5215 MIRBuilder.buildMergeLikeInstr(MI.getReg(i), OutputRegs[i]);
5216 }
5217
5218 MI.eraseFromParent();
5219 return Legalized;
5220}
5221
5224 unsigned NumElts) {
5225 unsigned OrigNumElts = MRI.getType(MI.getReg(0)).getNumElements();
5226
5227 unsigned NumInputs = MI.getNumOperands() - MI.getNumDefs();
5228 unsigned NumDefs = MI.getNumDefs();
5229
5230 SmallVector<DstOp, 8> OutputOpsPieces;
5231 SmallVector<Register, 8> OutputRegs;
5232 makeDstOps(OutputOpsPieces, MRI.getType(MI.getReg(0)), NumElts);
5233
5234 // Instructions that perform register split will be inserted in basic block
5235 // where register is defined (basic block is in the next operand).
5236 SmallVector<SmallVector<Register, 8>, 3> InputOpsPieces(NumInputs / 2);
5237 for (unsigned UseIdx = NumDefs, UseNo = 0; UseIdx < MI.getNumOperands();
5238 UseIdx += 2, ++UseNo) {
5239 MachineBasicBlock &OpMBB = *MI.getOperand(UseIdx + 1).getMBB();
5240 MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminatorForward());
5241 extractVectorParts(MI.getReg(UseIdx), NumElts, InputOpsPieces[UseNo],
5242 MIRBuilder, MRI);
5243 }
5244
5245 // Build PHIs with fewer elements.
5246 unsigned NumLeftovers = OrigNumElts % NumElts ? 1 : 0;
5247 MIRBuilder.setInsertPt(*MI.getParent(), MI);
5248 for (unsigned i = 0; i < OrigNumElts / NumElts + NumLeftovers; ++i) {
5249 auto Phi = MIRBuilder.buildInstr(TargetOpcode::G_PHI);
5250 Phi.addDef(
5251 MRI.createGenericVirtualRegister(OutputOpsPieces[i].getLLTTy(MRI)));
5252 OutputRegs.push_back(Phi.getReg(0));
5253
5254 for (unsigned j = 0; j < NumInputs / 2; ++j) {
5255 Phi.addUse(InputOpsPieces[j][i]);
5256 Phi.add(MI.getOperand(1 + j * 2 + 1));
5257 }
5258 }
5259
5260 // Set the insert point after the existing PHIs
5261 MachineBasicBlock &MBB = *MI.getParent();
5262 MIRBuilder.setInsertPt(MBB, MBB.getFirstNonPHI());
5263
5264 // Merge small outputs into MI's def.
5265 if (NumLeftovers) {
5266 mergeMixedSubvectors(MI.getReg(0), OutputRegs);
5267 } else {
5268 MIRBuilder.buildMergeLikeInstr(MI.getReg(0), OutputRegs);
5269 }
5270
5271 MI.eraseFromParent();
5272 return Legalized;
5273}
5274
5277 unsigned TypeIdx,
5278 LLT NarrowTy) {
5279 const int NumDst = MI.getNumOperands() - 1;
5280 const Register SrcReg = MI.getOperand(NumDst).getReg();
5281 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5282 LLT SrcTy = MRI.getType(SrcReg);
5283
5284 if (TypeIdx != 1 || NarrowTy == DstTy)
5285 return UnableToLegalize;
5286
5287 // Requires compatible types. Otherwise SrcReg should have been defined by
5288 // merge-like instruction that would get artifact combined. Most likely
5289 // instruction that defines SrcReg has to perform more/fewer elements
5290 // legalization compatible with NarrowTy.
5291 assert(SrcTy.isVector() && NarrowTy.isVector() && "Expected vector types");
5292 assert((SrcTy.getScalarType() == NarrowTy.getScalarType()) && "bad type");
5293
5294 if ((SrcTy.getSizeInBits() % NarrowTy.getSizeInBits() != 0) ||
5295 (NarrowTy.getSizeInBits() % DstTy.getSizeInBits() != 0))
5296 return UnableToLegalize;
5297
5298 // This is most likely DstTy (smaller then register size) packed in SrcTy
5299 // (larger then register size) and since unmerge was not combined it will be
5300 // lowered to bit sequence extracts from register. Unpack SrcTy to NarrowTy
5301 // (register size) pieces first. Then unpack each of NarrowTy pieces to DstTy.
5302
5303 // %1:_(DstTy), %2, %3, %4 = G_UNMERGE_VALUES %0:_(SrcTy)
5304 //
5305 // %5:_(NarrowTy), %6 = G_UNMERGE_VALUES %0:_(SrcTy) - reg sequence
5306 // %1:_(DstTy), %2 = G_UNMERGE_VALUES %5:_(NarrowTy) - sequence of bits in reg
5307 // %3:_(DstTy), %4 = G_UNMERGE_VALUES %6:_(NarrowTy)
5308 auto Unmerge = MIRBuilder.buildUnmerge(NarrowTy, SrcReg);
5309 const int NumUnmerge = Unmerge->getNumOperands() - 1;
5310 const int PartsPerUnmerge = NumDst / NumUnmerge;
5311
5312 for (int I = 0; I != NumUnmerge; ++I) {
5313 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_UNMERGE_VALUES);
5314
5315 for (int J = 0; J != PartsPerUnmerge; ++J)
5316 MIB.addDef(MI.getOperand(I * PartsPerUnmerge + J).getReg());
5317 MIB.addUse(Unmerge.getReg(I));
5318 }
5319
5320 MI.eraseFromParent();
5321 return Legalized;
5322}
5323
5326 LLT NarrowTy) {
5327 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
5328 // Requires compatible types. Otherwise user of DstReg did not perform unmerge
5329 // that should have been artifact combined. Most likely instruction that uses
5330 // DstReg has to do more/fewer elements legalization compatible with NarrowTy.
5331 assert(DstTy.isVector() && NarrowTy.isVector() && "Expected vector types");
5332 assert((DstTy.getScalarType() == NarrowTy.getScalarType()) && "bad type");
5333 if (NarrowTy == SrcTy)
5334 return UnableToLegalize;
5335
5336 // This attempts to lower part of LCMTy merge/unmerge sequence. Intended use
5337 // is for old mir tests. Since the changes to more/fewer elements it should no
5338 // longer be possible to generate MIR like this when starting from llvm-ir
5339 // because LCMTy approach was replaced with merge/unmerge to vector elements.
5340 if (TypeIdx == 1) {
5341 assert(SrcTy.isVector() && "Expected vector types");
5342 assert((SrcTy.getScalarType() == NarrowTy.getScalarType()) && "bad type");
5343 if ((DstTy.getSizeInBits() % NarrowTy.getSizeInBits() != 0) ||
5344 (NarrowTy.getNumElements() >= SrcTy.getNumElements()))
5345 return UnableToLegalize;
5346 // %2:_(DstTy) = G_CONCAT_VECTORS %0:_(SrcTy), %1:_(SrcTy)
5347 //
5348 // %3:_(EltTy), %4, %5 = G_UNMERGE_VALUES %0:_(SrcTy)
5349 // %6:_(EltTy), %7, %8 = G_UNMERGE_VALUES %1:_(SrcTy)
5350 // %9:_(NarrowTy) = G_BUILD_VECTOR %3:_(EltTy), %4
5351 // %10:_(NarrowTy) = G_BUILD_VECTOR %5:_(EltTy), %6
5352 // %11:_(NarrowTy) = G_BUILD_VECTOR %7:_(EltTy), %8
5353 // %2:_(DstTy) = G_CONCAT_VECTORS %9:_(NarrowTy), %10, %11
5354
5356 LLT EltTy = MRI.getType(MI.getOperand(1).getReg()).getScalarType();
5357 for (unsigned i = 1; i < MI.getNumOperands(); ++i) {
5358 auto Unmerge = MIRBuilder.buildUnmerge(EltTy, MI.getOperand(i).getReg());
5359 for (unsigned j = 0; j < Unmerge->getNumDefs(); ++j)
5360 Elts.push_back(Unmerge.getReg(j));
5361 }
5362
5363 SmallVector<Register, 8> NarrowTyElts;
5364 unsigned NumNarrowTyElts = NarrowTy.getNumElements();
5365 unsigned NumNarrowTyPieces = DstTy.getNumElements() / NumNarrowTyElts;
5366 for (unsigned i = 0, Offset = 0; i < NumNarrowTyPieces;
5367 ++i, Offset += NumNarrowTyElts) {
5368 ArrayRef<Register> Pieces(&Elts[Offset], NumNarrowTyElts);
5369 NarrowTyElts.push_back(
5370 MIRBuilder.buildMergeLikeInstr(NarrowTy, Pieces).getReg(0));
5371 }
5372
5373 MIRBuilder.buildMergeLikeInstr(DstReg, NarrowTyElts);
5374 MI.eraseFromParent();
5375 return Legalized;
5376 }
5377
5378 assert(TypeIdx == 0 && "Bad type index");
5379 if ((NarrowTy.getSizeInBits() % SrcTy.getSizeInBits() != 0) ||
5380 (DstTy.getSizeInBits() % NarrowTy.getSizeInBits() != 0))
5381 return UnableToLegalize;
5382
5383 // This is most likely SrcTy (smaller then register size) packed in DstTy
5384 // (larger then register size) and since merge was not combined it will be
5385 // lowered to bit sequence packing into register. Merge SrcTy to NarrowTy
5386 // (register size) pieces first. Then merge each of NarrowTy pieces to DstTy.
5387
5388 // %0:_(DstTy) = G_MERGE_VALUES %1:_(SrcTy), %2, %3, %4
5389 //
5390 // %5:_(NarrowTy) = G_MERGE_VALUES %1:_(SrcTy), %2 - sequence of bits in reg
5391 // %6:_(NarrowTy) = G_MERGE_VALUES %3:_(SrcTy), %4
5392 // %0:_(DstTy) = G_MERGE_VALUES %5:_(NarrowTy), %6 - reg sequence
5393 SmallVector<Register, 8> NarrowTyElts;
5394 unsigned NumParts = DstTy.getNumElements() / NarrowTy.getNumElements();
5395 unsigned NumSrcElts = SrcTy.isVector() ? SrcTy.getNumElements() : 1;
5396 unsigned NumElts = NarrowTy.getNumElements() / NumSrcElts;
5397 for (unsigned i = 0; i < NumParts; ++i) {
5399 for (unsigned j = 0; j < NumElts; ++j)
5400 Sources.push_back(MI.getOperand(1 + i * NumElts + j).getReg());
5401 NarrowTyElts.push_back(
5402 MIRBuilder.buildMergeLikeInstr(NarrowTy, Sources).getReg(0));
5403 }
5404
5405 MIRBuilder.buildMergeLikeInstr(DstReg, NarrowTyElts);
5406 MI.eraseFromParent();
5407 return Legalized;
5408}
5409
5412 unsigned TypeIdx,
5413 LLT NarrowVecTy) {
5414 auto [DstReg, SrcVec] = MI.getFirst2Regs();
5415 Register InsertVal;
5416 bool IsInsert = MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT;
5417
5418 assert((IsInsert ? TypeIdx == 0 : TypeIdx == 1) && "not a vector type index");
5419 if (IsInsert)
5420 InsertVal = MI.getOperand(2).getReg();
5421
5422 Register Idx = MI.getOperand(MI.getNumOperands() - 1).getReg();
5423 LLT VecTy = MRI.getType(SrcVec);
5424
5425 // If the index is a constant, we can really break this down as you would
5426 // expect, and index into the target size pieces.
5427 auto MaybeCst = getIConstantVRegValWithLookThrough(Idx, MRI);
5428 if (MaybeCst) {
5429 uint64_t IdxVal = MaybeCst->Value.getZExtValue();
5430 // Avoid out of bounds indexing the pieces.
5431 if (IdxVal >= VecTy.getNumElements()) {
5432 MIRBuilder.buildUndef(DstReg);
5433 MI.eraseFromParent();
5434 return Legalized;
5435 }
5436
5437 if (!NarrowVecTy.isVector()) {
5438 SmallVector<Register, 8> SplitPieces;
5439 extractParts(MI.getOperand(1).getReg(), NarrowVecTy,
5440 VecTy.getNumElements(), SplitPieces, MIRBuilder, MRI);
5441 if (IsInsert) {
5442 SplitPieces[IdxVal] = InsertVal;
5443 MIRBuilder.buildMergeLikeInstr(MI.getOperand(0).getReg(), SplitPieces);
5444 } else {
5445 MIRBuilder.buildCopy(MI.getOperand(0).getReg(), SplitPieces[IdxVal]);
5446 }
5447 } else {
5448 SmallVector<Register, 8> VecParts;
5449 LLT GCDTy = extractGCDType(VecParts, VecTy, NarrowVecTy, SrcVec);
5450
5451 // Build a sequence of NarrowTy pieces in VecParts for this operand.
5452 LLT LCMTy = buildLCMMergePieces(VecTy, NarrowVecTy, GCDTy, VecParts,
5453 TargetOpcode::G_ANYEXT);
5454
5455 unsigned NewNumElts = NarrowVecTy.getNumElements();
5456
5457 LLT IdxTy = MRI.getType(Idx);
5458 int64_t PartIdx = IdxVal / NewNumElts;
5459 auto NewIdx =
5460 MIRBuilder.buildConstant(IdxTy, IdxVal - NewNumElts * PartIdx);
5461
5462 if (IsInsert) {
5463 LLT PartTy = MRI.getType(VecParts[PartIdx]);
5464
5465 // Use the adjusted index to insert into one of the subvectors.
5466 auto InsertPart = MIRBuilder.buildInsertVectorElement(
5467 PartTy, VecParts[PartIdx], InsertVal, NewIdx);
5468 VecParts[PartIdx] = InsertPart.getReg(0);
5469
5470 // Recombine the inserted subvector with the others to reform the result
5471 // vector.
5472 buildWidenedRemergeToDst(DstReg, LCMTy, VecParts);
5473 } else {
5474 MIRBuilder.buildExtractVectorElement(DstReg, VecParts[PartIdx], NewIdx);
5475 }
5476 }
5477
5478 MI.eraseFromParent();
5479 return Legalized;
5480 }
5481
5482 // With a variable index, we can't perform the operation in a smaller type, so
5483 // we're forced to expand this.
5484 //
5485 // TODO: We could emit a chain of compare/select to figure out which piece to
5486 // index.
5488}
5489
5492 LLT NarrowTy) {
5493 // FIXME: Don't know how to handle secondary types yet.
5494 if (TypeIdx != 0)
5495 return UnableToLegalize;
5496
5497 if (!NarrowTy.isByteSized()) {
5498 LLVM_DEBUG(dbgs() << "Can't narrow load/store to non-byte-sized type\n");
5499 return UnableToLegalize;
5500 }
5501
5502 // This implementation doesn't work for atomics. Give up instead of doing
5503 // something invalid.
5504 if (LdStMI.isAtomic())
5505 return UnableToLegalize;
5506
5507 bool IsLoad = isa<GLoad>(LdStMI);
5508 Register ValReg = LdStMI.getReg(0);
5509 Register AddrReg = LdStMI.getPointerReg();
5510 LLT ValTy = MRI.getType(ValReg);
5511
5512 // FIXME: Do we need a distinct NarrowMemory legalize action?
5513 if (ValTy.getSizeInBits() != 8 * LdStMI.getMemSize().getValue()) {
5514 LLVM_DEBUG(dbgs() << "Can't narrow extload/truncstore\n");
5515 return UnableToLegalize;
5516 }
5517
5518 int NumParts = -1;
5519 int NumLeftover = -1;
5520 LLT LeftoverTy;
5521 SmallVector<Register, 8> NarrowRegs, NarrowLeftoverRegs;
5522 if (IsLoad) {
5523 std::tie(NumParts, NumLeftover) = getNarrowTypeBreakDown(ValTy, NarrowTy, LeftoverTy);
5524 } else {
5525 if (extractParts(ValReg, ValTy, NarrowTy, LeftoverTy, NarrowRegs,
5526 NarrowLeftoverRegs, MIRBuilder, MRI)) {
5527 NumParts = NarrowRegs.size();
5528 NumLeftover = NarrowLeftoverRegs.size();
5529 }
5530 }
5531
5532 if (NumParts == -1)
5533 return UnableToLegalize;
5534
5535 LLT PtrTy = MRI.getType(AddrReg);
5536 const LLT OffsetTy = LLT::integer(PtrTy.getSizeInBits());
5537
5538 unsigned TotalSize = ValTy.getSizeInBits();
5539
5540 // Split the load/store into PartTy sized pieces starting at Offset. If this
5541 // is a load, return the new registers in ValRegs. For a store, each elements
5542 // of ValRegs should be PartTy. Returns the next offset that needs to be
5543 // handled.
5544 bool isBigEndian = MIRBuilder.getDataLayout().isBigEndian();
5545 auto MMO = LdStMI.getMMO();
5546 auto splitTypePieces = [=](LLT PartTy, SmallVectorImpl<Register> &ValRegs,
5547 unsigned NumParts, unsigned Offset) -> unsigned {
5548 MachineFunction &MF = MIRBuilder.getMF();
5549 unsigned PartSize = PartTy.getSizeInBits();
5550 for (unsigned Idx = 0, E = NumParts; Idx != E && Offset < TotalSize;
5551 ++Idx) {
5552 unsigned ByteOffset = Offset / 8;
5553 Register NewAddrReg;
5554
5555 MIRBuilder.materializeObjectPtrOffset(NewAddrReg, AddrReg, OffsetTy,
5556 ByteOffset);
5557
5558 MachineMemOperand *NewMMO =
5559 MF.getMachineMemOperand(&MMO, ByteOffset, PartTy);
5560
5561 if (IsLoad) {
5562 Register Dst = MRI.createGenericVirtualRegister(PartTy);
5563 ValRegs.push_back(Dst);
5564 MIRBuilder.buildLoad(Dst, NewAddrReg, *NewMMO);
5565 } else {
5566 MIRBuilder.buildStore(ValRegs[Idx], NewAddrReg, *NewMMO);
5567 }
5568 Offset = isBigEndian ? Offset - PartSize : Offset + PartSize;
5569 }
5570
5571 return Offset;
5572 };
5573
5574 unsigned Offset = isBigEndian ? TotalSize - NarrowTy.getSizeInBits() : 0;
5575 unsigned HandledOffset =
5576 splitTypePieces(NarrowTy, NarrowRegs, NumParts, Offset);
5577
5578 // Handle the rest of the register if this isn't an even type breakdown.
5579 if (LeftoverTy.isValid())
5580 splitTypePieces(LeftoverTy, NarrowLeftoverRegs, NumLeftover, HandledOffset);
5581
5582 if (IsLoad) {
5583 insertParts(ValReg, ValTy, NarrowTy, NarrowRegs,
5584 LeftoverTy, NarrowLeftoverRegs);
5585 }
5586
5587 LdStMI.eraseFromParent();
5588 return Legalized;
5589}
5590
5593 LLT NarrowTy) {
5594 using namespace TargetOpcode;
5596 unsigned NumElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1;
5597
5598 switch (MI.getOpcode()) {
5599 case G_IMPLICIT_DEF:
5600 case G_TRUNC:
5601 case G_AND:
5602 case G_OR:
5603 case G_XOR:
5604 case G_ADD:
5605 case G_SUB:
5606 case G_MUL:
5607 case G_PTR_ADD:
5608 case G_SMULH:
5609 case G_UMULH:
5610 case G_FADD:
5611 case G_FMUL:
5612 case G_FSUB:
5613 case G_FNEG:
5614 case G_FABS:
5615 case G_FCANONICALIZE:
5616 case G_FDIV:
5617 case G_FREM:
5618 case G_FMA:
5619 case G_FMAD:
5620 case G_FPOW:
5621 case G_FEXP:
5622 case G_FEXP2:
5623 case G_FEXP10:
5624 case G_FLOG:
5625 case G_FLOG2:
5626 case G_FLOG10:
5627 case G_FLDEXP:
5628 case G_FNEARBYINT:
5629 case G_FCEIL:
5630 case G_FFLOOR:
5631 case G_FRINT:
5632 case G_INTRINSIC_LRINT:
5633 case G_INTRINSIC_LLRINT:
5634 case G_INTRINSIC_ROUND:
5635 case G_INTRINSIC_ROUNDEVEN:
5636 case G_LROUND:
5637 case G_LLROUND:
5638 case G_INTRINSIC_TRUNC:
5639 case G_FMODF:
5640 case G_FCOS:
5641 case G_FSIN:
5642 case G_FTAN:
5643 case G_FACOS:
5644 case G_FASIN:
5645 case G_FATAN:
5646 case G_FATAN2:
5647 case G_FCOSH:
5648 case G_FSINH:
5649 case G_FTANH:
5650 case G_FSQRT:
5651 case G_BSWAP:
5652 case G_BITREVERSE:
5653 case G_SDIV:
5654 case G_UDIV:
5655 case G_SREM:
5656 case G_UREM:
5657 case G_SDIVREM:
5658 case G_UDIVREM:
5659 case G_SMIN:
5660 case G_SMAX:
5661 case G_UMIN:
5662 case G_UMAX:
5663 case G_ABS:
5664 case G_FMINNUM:
5665 case G_FMAXNUM:
5666 case G_FMINNUM_IEEE:
5667 case G_FMAXNUM_IEEE:
5668 case G_FMINIMUM:
5669 case G_FMAXIMUM:
5670 case G_FMINIMUMNUM:
5671 case G_FMAXIMUMNUM:
5672 case G_FSHL:
5673 case G_FSHR:
5674 case G_ROTL:
5675 case G_ROTR:
5676 case G_FREEZE:
5677 case G_SADDSAT:
5678 case G_SSUBSAT:
5679 case G_UADDSAT:
5680 case G_USUBSAT:
5681 case G_UMULO:
5682 case G_SMULO:
5683 case G_SHL:
5684 case G_LSHR:
5685 case G_ASHR:
5686 case G_SSHLSAT:
5687 case G_USHLSAT:
5688 case G_CTLZ:
5689 case G_CTLZ_ZERO_POISON:
5690 case G_CTTZ:
5691 case G_CTTZ_ZERO_POISON:
5692 case G_CTPOP:
5693 case G_CTLS:
5694 case G_FCOPYSIGN:
5695 case G_ZEXT:
5696 case G_SEXT:
5697 case G_ANYEXT:
5698 case G_FPEXT:
5699 case G_FPTRUNC:
5700 case G_SITOFP:
5701 case G_UITOFP:
5702 case G_FPTOSI:
5703 case G_FPTOUI:
5704 case G_FPTOSI_SAT:
5705 case G_FPTOUI_SAT:
5706 case G_INTTOPTR:
5707 case G_PTRTOINT:
5708 case G_ADDRSPACE_CAST:
5709 case G_UADDO:
5710 case G_USUBO:
5711 case G_UADDE:
5712 case G_USUBE:
5713 case G_SADDO:
5714 case G_SSUBO:
5715 case G_SADDE:
5716 case G_SSUBE:
5717 case G_STRICT_FADD:
5718 case G_STRICT_FSUB:
5719 case G_STRICT_FMUL:
5720 case G_STRICT_FMA:
5721 case G_STRICT_FLDEXP:
5722 case G_FFREXP:
5723 case G_TRUNC_SSAT_S:
5724 case G_TRUNC_SSAT_U:
5725 case G_TRUNC_USAT_U:
5726 return fewerElementsVectorMultiEltType(GMI, NumElts);
5727 case G_ICMP:
5728 case G_FCMP:
5729 return fewerElementsVectorMultiEltType(GMI, NumElts, {1 /*cpm predicate*/});
5730 case G_IS_FPCLASS:
5731 return fewerElementsVectorMultiEltType(GMI, NumElts, {2, 3 /*mask,fpsem*/});
5732 case G_SELECT:
5733 if (MRI.getType(MI.getOperand(1).getReg()).isVector())
5734 return fewerElementsVectorMultiEltType(GMI, NumElts);
5735 return fewerElementsVectorMultiEltType(GMI, NumElts, {1 /*scalar cond*/});
5736 case G_PHI:
5737 return fewerElementsVectorPhi(GMI, NumElts);
5738 case G_UNMERGE_VALUES:
5739 return fewerElementsVectorUnmergeValues(MI, TypeIdx, NarrowTy);
5740 case G_BUILD_VECTOR:
5741 assert(TypeIdx == 0 && "not a vector type index");
5742 return fewerElementsVectorMerge(MI, TypeIdx, NarrowTy);
5743 case G_CONCAT_VECTORS:
5744 if (TypeIdx != 1) // TODO: This probably does work as expected already.
5745 return UnableToLegalize;
5746 return fewerElementsVectorMerge(MI, TypeIdx, NarrowTy);
5747 case G_EXTRACT_VECTOR_ELT:
5748 case G_INSERT_VECTOR_ELT:
5749 return fewerElementsVectorExtractInsertVectorElt(MI, TypeIdx, NarrowTy);
5750 case G_LOAD:
5751 case G_STORE:
5752 return reduceLoadStoreWidth(cast<GLoadStore>(MI), TypeIdx, NarrowTy);
5753 case G_SEXT_INREG:
5754 return fewerElementsVectorMultiEltType(GMI, NumElts, {2 /*imm*/});
5756 return fewerElementsVectorReductions(MI, TypeIdx, NarrowTy);
5757 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
5758 case TargetOpcode::G_VECREDUCE_SEQ_FMUL:
5759 return fewerElementsVectorSeqReductions(MI, TypeIdx, NarrowTy);
5760 case G_SHUFFLE_VECTOR:
5761 return fewerElementsVectorShuffle(MI, TypeIdx, NarrowTy);
5762 case G_FPOWI:
5763 return fewerElementsVectorMultiEltType(GMI, NumElts, {2 /*pow*/});
5764 case G_BITCAST:
5765 return fewerElementsBitcast(MI, TypeIdx, NarrowTy);
5766 case G_INTRINSIC_FPTRUNC_ROUND:
5767 return fewerElementsVectorMultiEltType(GMI, NumElts, {2});
5768 default:
5769 return UnableToLegalize;
5770 }
5771}
5772
5775 LLT NarrowTy) {
5776 assert(MI.getOpcode() == TargetOpcode::G_BITCAST &&
5777 "Not a bitcast operation");
5778
5779 if (TypeIdx != 0)
5780 return UnableToLegalize;
5781
5782 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
5783
5784 unsigned NewElemCount =
5785 NarrowTy.getSizeInBits() / SrcTy.getScalarSizeInBits();
5786 SmallVector<Register> SrcVRegs, BitcastVRegs;
5787 if (NewElemCount == 1) {
5788 LLT SrcNarrowTy = SrcTy.getElementType();
5789
5790 auto Unmerge = MIRBuilder.buildUnmerge(SrcNarrowTy, SrcReg);
5791 getUnmergeResults(SrcVRegs, *Unmerge);
5792 } else {
5793 LLT SrcNarrowTy =
5795
5796 // Split the Src and Dst Reg into smaller registers
5797 if (extractGCDType(SrcVRegs, DstTy, SrcNarrowTy, SrcReg) != SrcNarrowTy)
5798 return UnableToLegalize;
5799 }
5800
5801 // Build new smaller bitcast instructions
5802 // Not supporting Leftover types for now but will have to
5803 for (Register Reg : SrcVRegs)
5804 BitcastVRegs.push_back(MIRBuilder.buildBitcast(NarrowTy, Reg).getReg(0));
5805
5806 MIRBuilder.buildMergeLikeInstr(DstReg, BitcastVRegs);
5807 MI.eraseFromParent();
5808 return Legalized;
5809}
5810
5812 MachineInstr &MI, unsigned int TypeIdx, LLT NarrowTy) {
5813 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
5814 if (TypeIdx != 0)
5815 return UnableToLegalize;
5816
5817 auto [DstReg, DstTy, Src1Reg, Src1Ty, Src2Reg, Src2Ty] =
5818 MI.getFirst3RegLLTs();
5819 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
5820 // The shuffle should be canonicalized by now.
5821 if (DstTy != Src1Ty)
5822 return UnableToLegalize;
5823 if (DstTy != Src2Ty)
5824 return UnableToLegalize;
5825
5826 if (!isPowerOf2_32(DstTy.getNumElements()))
5827 return UnableToLegalize;
5828
5829 // We only support splitting a shuffle into 2, so adjust NarrowTy accordingly.
5830 // Further legalization attempts will be needed to do split further.
5831 NarrowTy =
5832 DstTy.changeElementCount(DstTy.getElementCount().divideCoefficientBy(2));
5833 unsigned NewElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1;
5834
5835 SmallVector<Register> SplitSrc1Regs, SplitSrc2Regs;
5836 extractParts(Src1Reg, NarrowTy, 2, SplitSrc1Regs, MIRBuilder, MRI);
5837 extractParts(Src2Reg, NarrowTy, 2, SplitSrc2Regs, MIRBuilder, MRI);
5838 Register Inputs[4] = {SplitSrc1Regs[0], SplitSrc1Regs[1], SplitSrc2Regs[0],
5839 SplitSrc2Regs[1]};
5840
5841 Register Hi, Lo;
5842
5843 // If Lo or Hi uses elements from at most two of the four input vectors, then
5844 // express it as a vector shuffle of those two inputs. Otherwise extract the
5845 // input elements by hand and construct the Lo/Hi output using a BUILD_VECTOR.
5847 for (unsigned High = 0; High < 2; ++High) {
5848 Register &Output = High ? Hi : Lo;
5849
5850 // Build a shuffle mask for the output, discovering on the fly which
5851 // input vectors to use as shuffle operands (recorded in InputUsed).
5852 // If building a suitable shuffle vector proves too hard, then bail
5853 // out with useBuildVector set.
5854 unsigned InputUsed[2] = {-1U, -1U}; // Not yet discovered.
5855 unsigned FirstMaskIdx = High * NewElts;
5856 bool UseBuildVector = false;
5857 for (unsigned MaskOffset = 0; MaskOffset < NewElts; ++MaskOffset) {
5858 // The mask element. This indexes into the input.
5859 int Idx = Mask[FirstMaskIdx + MaskOffset];
5860
5861 // The input vector this mask element indexes into.
5862 unsigned Input = (unsigned)Idx / NewElts;
5863
5864 if (Input >= std::size(Inputs)) {
5865 // The mask element does not index into any input vector.
5866 Ops.push_back(-1);
5867 continue;
5868 }
5869
5870 // Turn the index into an offset from the start of the input vector.
5871 Idx -= Input * NewElts;
5872
5873 // Find or create a shuffle vector operand to hold this input.
5874 unsigned OpNo;
5875 for (OpNo = 0; OpNo < std::size(InputUsed); ++OpNo) {
5876 if (InputUsed[OpNo] == Input) {
5877 // This input vector is already an operand.
5878 break;
5879 } else if (InputUsed[OpNo] == -1U) {
5880 // Create a new operand for this input vector.
5881 InputUsed[OpNo] = Input;
5882 break;
5883 }
5884 }
5885
5886 if (OpNo >= std::size(InputUsed)) {
5887 // More than two input vectors used! Give up on trying to create a
5888 // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
5889 UseBuildVector = true;
5890 break;
5891 }
5892
5893 // Add the mask index for the new shuffle vector.
5894 Ops.push_back(Idx + OpNo * NewElts);
5895 }
5896
5897 if (UseBuildVector) {
5898 LLT EltTy = NarrowTy.getElementType();
5900
5901 // Extract the input elements by hand.
5902 for (unsigned MaskOffset = 0; MaskOffset < NewElts; ++MaskOffset) {
5903 // The mask element. This indexes into the input.
5904 int Idx = Mask[FirstMaskIdx + MaskOffset];
5905
5906 // The input vector this mask element indexes into.
5907 unsigned Input = (unsigned)Idx / NewElts;
5908
5909 if (Input >= std::size(Inputs)) {
5910 // The mask element is "undef" or indexes off the end of the input.
5911 SVOps.push_back(MIRBuilder.buildUndef(EltTy).getReg(0));
5912 continue;
5913 }
5914
5915 // Turn the index into an offset from the start of the input vector.
5916 Idx -= Input * NewElts;
5917
5918 // Extract the vector element by hand.
5919 SVOps.push_back(MIRBuilder
5920 .buildExtractVectorElement(
5921 EltTy, Inputs[Input],
5922 MIRBuilder.buildConstant(LLT::scalar(32), Idx))
5923 .getReg(0));
5924 }
5925
5926 // Construct the Lo/Hi output using a G_BUILD_VECTOR.
5927 Output = MIRBuilder.buildBuildVector(NarrowTy, SVOps).getReg(0);
5928 } else if (InputUsed[0] == -1U) {
5929 // No input vectors were used! The result is undefined.
5930 Output = MIRBuilder.buildUndef(NarrowTy).getReg(0);
5931 } else if (NewElts == 1) {
5932 Output = MIRBuilder.buildCopy(NarrowTy, Inputs[InputUsed[0]]).getReg(0);
5933 } else {
5934 Register Op0 = Inputs[InputUsed[0]];
5935 // If only one input was used, use an undefined vector for the other.
5936 Register Op1 = InputUsed[1] == -1U
5937 ? MIRBuilder.buildUndef(NarrowTy).getReg(0)
5938 : Inputs[InputUsed[1]];
5939 // At least one input vector was used. Create a new shuffle vector.
5940 Output = MIRBuilder.buildShuffleVector(NarrowTy, Op0, Op1, Ops).getReg(0);
5941 }
5942
5943 Ops.clear();
5944 }
5945
5946 MIRBuilder.buildMergeLikeInstr(DstReg, {Lo, Hi});
5947 MI.eraseFromParent();
5948 return Legalized;
5949}
5950
5952 MachineInstr &MI, unsigned int TypeIdx, LLT NarrowTy) {
5953 auto &RdxMI = cast<GVecReduce>(MI);
5954
5955 if (TypeIdx != 1)
5956 return UnableToLegalize;
5957
5958 // The semantics of the normal non-sequential reductions allow us to freely
5959 // re-associate the operation.
5960 auto [DstReg, DstTy, SrcReg, SrcTy] = RdxMI.getFirst2RegLLTs();
5961
5962 if (NarrowTy.isVector() &&
5963 (SrcTy.getNumElements() % NarrowTy.getNumElements() != 0))
5964 return UnableToLegalize;
5965
5966 unsigned ScalarOpc = RdxMI.getScalarOpcForReduction();
5967 SmallVector<Register> SplitSrcs;
5968 // If NarrowTy is a scalar then we're being asked to scalarize.
5969 const unsigned NumParts =
5970 NarrowTy.isVector() ? SrcTy.getNumElements() / NarrowTy.getNumElements()
5971 : SrcTy.getNumElements();
5972
5973 extractParts(SrcReg, NarrowTy, NumParts, SplitSrcs, MIRBuilder, MRI);
5974 if (NarrowTy.isScalar()) {
5975 if (DstTy != NarrowTy)
5976 return UnableToLegalize; // FIXME: handle implicit extensions.
5977
5978 if (isPowerOf2_32(NumParts)) {
5979 // Generate a tree of scalar operations to reduce the critical path.
5980 SmallVector<Register> PartialResults;
5981 unsigned NumPartsLeft = NumParts;
5982 while (NumPartsLeft > 1) {
5983 for (unsigned Idx = 0; Idx < NumPartsLeft - 1; Idx += 2) {
5984 PartialResults.emplace_back(
5986 .buildInstr(ScalarOpc, {NarrowTy},
5987 {SplitSrcs[Idx], SplitSrcs[Idx + 1]})
5988 .getReg(0));
5989 }
5990 SplitSrcs = PartialResults;
5991 PartialResults.clear();
5992 NumPartsLeft = SplitSrcs.size();
5993 }
5994 assert(SplitSrcs.size() == 1);
5995 MIRBuilder.buildCopy(DstReg, SplitSrcs[0]);
5996 MI.eraseFromParent();
5997 return Legalized;
5998 }
5999 // If we can't generate a tree, then just do sequential operations.
6000 Register Acc = SplitSrcs[0];
6001 for (unsigned Idx = 1; Idx < NumParts; ++Idx)
6002 Acc = MIRBuilder.buildInstr(ScalarOpc, {NarrowTy}, {Acc, SplitSrcs[Idx]})
6003 .getReg(0);
6004 MIRBuilder.buildCopy(DstReg, Acc);
6005 MI.eraseFromParent();
6006 return Legalized;
6007 }
6008 SmallVector<Register> PartialReductions;
6009 for (unsigned Part = 0; Part < NumParts; ++Part) {
6010 PartialReductions.push_back(
6011 MIRBuilder.buildInstr(RdxMI.getOpcode(), {DstTy}, {SplitSrcs[Part]})
6012 .getReg(0));
6013 }
6014
6015 // If the types involved are powers of 2, we can generate intermediate vector
6016 // ops, before generating a final reduction operation.
6017 if (isPowerOf2_32(SrcTy.getNumElements()) &&
6018 isPowerOf2_32(NarrowTy.getNumElements())) {
6019 return tryNarrowPow2Reduction(MI, SrcReg, SrcTy, NarrowTy, ScalarOpc);
6020 }
6021
6022 Register Acc = PartialReductions[0];
6023 for (unsigned Part = 1; Part < NumParts; ++Part) {
6024 if (Part == NumParts - 1) {
6025 MIRBuilder.buildInstr(ScalarOpc, {DstReg},
6026 {Acc, PartialReductions[Part]});
6027 } else {
6028 Acc = MIRBuilder
6029 .buildInstr(ScalarOpc, {DstTy}, {Acc, PartialReductions[Part]})
6030 .getReg(0);
6031 }
6032 }
6033 MI.eraseFromParent();
6034 return Legalized;
6035}
6036
6039 unsigned int TypeIdx,
6040 LLT NarrowTy) {
6041 auto [DstReg, DstTy, ScalarReg, ScalarTy, SrcReg, SrcTy] =
6042 MI.getFirst3RegLLTs();
6043 if (!NarrowTy.isScalar() || TypeIdx != 2 || DstTy != ScalarTy ||
6044 DstTy != NarrowTy)
6045 return UnableToLegalize;
6046
6047 assert((MI.getOpcode() == TargetOpcode::G_VECREDUCE_SEQ_FADD ||
6048 MI.getOpcode() == TargetOpcode::G_VECREDUCE_SEQ_FMUL) &&
6049 "Unexpected vecreduce opcode");
6050 unsigned ScalarOpc = MI.getOpcode() == TargetOpcode::G_VECREDUCE_SEQ_FADD
6051 ? TargetOpcode::G_FADD
6052 : TargetOpcode::G_FMUL;
6053
6054 SmallVector<Register> SplitSrcs;
6055 unsigned NumParts = SrcTy.getNumElements();
6056 extractParts(SrcReg, NarrowTy, NumParts, SplitSrcs, MIRBuilder, MRI);
6057 Register Acc = ScalarReg;
6058 for (unsigned i = 0; i < NumParts; i++)
6059 Acc = MIRBuilder.buildInstr(ScalarOpc, {NarrowTy}, {Acc, SplitSrcs[i]})
6060 .getReg(0);
6061
6062 MIRBuilder.buildCopy(DstReg, Acc);
6063 MI.eraseFromParent();
6064 return Legalized;
6065}
6066
6068LegalizerHelper::tryNarrowPow2Reduction(MachineInstr &MI, Register SrcReg,
6069 LLT SrcTy, LLT NarrowTy,
6070 unsigned ScalarOpc) {
6071 SmallVector<Register> SplitSrcs;
6072 // Split the sources into NarrowTy size pieces.
6073 extractParts(SrcReg, NarrowTy,
6074 SrcTy.getNumElements() / NarrowTy.getNumElements(), SplitSrcs,
6075 MIRBuilder, MRI);
6076 // We're going to do a tree reduction using vector operations until we have
6077 // one NarrowTy size value left.
6078 while (SplitSrcs.size() > 1) {
6079 SmallVector<Register> PartialRdxs;
6080 for (unsigned Idx = 0; Idx < SplitSrcs.size()-1; Idx += 2) {
6081 Register LHS = SplitSrcs[Idx];
6082 Register RHS = SplitSrcs[Idx + 1];
6083 // Create the intermediate vector op.
6084 Register Res =
6085 MIRBuilder.buildInstr(ScalarOpc, {NarrowTy}, {LHS, RHS}).getReg(0);
6086 PartialRdxs.push_back(Res);
6087 }
6088 SplitSrcs = std::move(PartialRdxs);
6089 }
6090 // Finally generate the requested NarrowTy based reduction.
6091 Observer.changingInstr(MI);
6092 MI.getOperand(1).setReg(SplitSrcs[0]);
6093 Observer.changedInstr(MI);
6094 return Legalized;
6095}
6096
6099 const LLT HalfTy, const LLT AmtTy) {
6100
6101 Register InL = MRI.createGenericVirtualRegister(HalfTy);
6102 Register InH = MRI.createGenericVirtualRegister(HalfTy);
6103 MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1));
6104
6105 if (Amt.isZero()) {
6106 MIRBuilder.buildMergeLikeInstr(MI.getOperand(0), {InL, InH});
6107 MI.eraseFromParent();
6108 return Legalized;
6109 }
6110
6111 LLT NVT = HalfTy;
6112 unsigned NVTBits = HalfTy.getSizeInBits();
6113 unsigned VTBits = 2 * NVTBits;
6114
6115 SrcOp Lo(Register(0)), Hi(Register(0));
6116 if (MI.getOpcode() == TargetOpcode::G_SHL) {
6117 if (Amt.ugt(VTBits)) {
6118 Lo = Hi = MIRBuilder.buildConstant(NVT, 0);
6119 } else if (Amt.ugt(NVTBits)) {
6120 Lo = MIRBuilder.buildConstant(NVT, 0);
6121 Hi = MIRBuilder.buildShl(NVT, InL,
6122 MIRBuilder.buildConstant(AmtTy, Amt - NVTBits));
6123 } else if (Amt == NVTBits) {
6124 Lo = MIRBuilder.buildConstant(NVT, 0);
6125 Hi = InL;
6126 } else {
6127 Lo = MIRBuilder.buildShl(NVT, InL, MIRBuilder.buildConstant(AmtTy, Amt));
6128 auto OrLHS =
6129 MIRBuilder.buildShl(NVT, InH, MIRBuilder.buildConstant(AmtTy, Amt));
6130 auto OrRHS = MIRBuilder.buildLShr(
6131 NVT, InL, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits));
6132 Hi = MIRBuilder.buildOr(NVT, OrLHS, OrRHS);
6133 }
6134 } else if (MI.getOpcode() == TargetOpcode::G_LSHR) {
6135 if (Amt.ugt(VTBits)) {
6136 Lo = Hi = MIRBuilder.buildConstant(NVT, 0);
6137 } else if (Amt.ugt(NVTBits)) {
6138 Lo = MIRBuilder.buildLShr(NVT, InH,
6139 MIRBuilder.buildConstant(AmtTy, Amt - NVTBits));
6140 Hi = MIRBuilder.buildConstant(NVT, 0);
6141 } else if (Amt == NVTBits) {
6142 Lo = InH;
6143 Hi = MIRBuilder.buildConstant(NVT, 0);
6144 } else {
6145 auto ShiftAmtConst = MIRBuilder.buildConstant(AmtTy, Amt);
6146
6147 auto OrLHS = MIRBuilder.buildLShr(NVT, InL, ShiftAmtConst);
6148 auto OrRHS = MIRBuilder.buildShl(
6149 NVT, InH, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits));
6150
6151 Lo = MIRBuilder.buildOr(NVT, OrLHS, OrRHS);
6152 Hi = MIRBuilder.buildLShr(NVT, InH, ShiftAmtConst);
6153 }
6154 } else {
6155 if (Amt.ugt(VTBits)) {
6156 Hi = Lo = MIRBuilder.buildAShr(
6157 NVT, InH, MIRBuilder.buildConstant(AmtTy, NVTBits - 1));
6158 } else if (Amt.ugt(NVTBits)) {
6159 Lo = MIRBuilder.buildAShr(NVT, InH,
6160 MIRBuilder.buildConstant(AmtTy, Amt - NVTBits));
6161 Hi = MIRBuilder.buildAShr(NVT, InH,
6162 MIRBuilder.buildConstant(AmtTy, NVTBits - 1));
6163 } else if (Amt == NVTBits) {
6164 Lo = InH;
6165 Hi = MIRBuilder.buildAShr(NVT, InH,
6166 MIRBuilder.buildConstant(AmtTy, NVTBits - 1));
6167 } else {
6168 auto ShiftAmtConst = MIRBuilder.buildConstant(AmtTy, Amt);
6169
6170 auto OrLHS = MIRBuilder.buildLShr(NVT, InL, ShiftAmtConst);
6171 auto OrRHS = MIRBuilder.buildShl(
6172 NVT, InH, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits));
6173
6174 Lo = MIRBuilder.buildOr(NVT, OrLHS, OrRHS);
6175 Hi = MIRBuilder.buildAShr(NVT, InH, ShiftAmtConst);
6176 }
6177 }
6178
6179 MIRBuilder.buildMergeLikeInstr(MI.getOperand(0), {Lo, Hi});
6180 MI.eraseFromParent();
6181
6182 return Legalized;
6183}
6184
6187 LLT RequestedTy) {
6188 if (TypeIdx == 1) {
6189 Observer.changingInstr(MI);
6190 narrowScalarSrc(MI, RequestedTy, 2);
6191 Observer.changedInstr(MI);
6192 return Legalized;
6193 }
6194
6195 Register DstReg = MI.getOperand(0).getReg();
6196 LLT DstTy = MRI.getType(DstReg);
6197 if (DstTy.isVector())
6198 return UnableToLegalize;
6199
6200 Register Amt = MI.getOperand(2).getReg();
6201 LLT ShiftAmtTy = MRI.getType(Amt);
6202 const unsigned DstEltSize = DstTy.getScalarSizeInBits();
6203 if (DstEltSize % 2 != 0)
6204 return UnableToLegalize;
6205
6206 // Check if we should use multi-way splitting instead of recursive binary
6207 // splitting.
6208 //
6209 // Multi-way splitting directly decomposes wide shifts (e.g., 128-bit ->
6210 // 4×32-bit) in a single legalization step, avoiding the recursive overhead
6211 // and dependency chains created by usual binary splitting approach
6212 // (128->64->32).
6213 //
6214 // The >= 8 parts threshold ensures we only use this optimization when binary
6215 // splitting would require multiple recursive passes, avoiding overhead for
6216 // simple 2-way splits where binary approach is sufficient.
6217 if (RequestedTy.isValid() && RequestedTy.isScalar() &&
6218 DstEltSize % RequestedTy.getSizeInBits() == 0) {
6219 const unsigned NumParts = DstEltSize / RequestedTy.getSizeInBits();
6220 // Use multiway if we have 8 or more parts (i.e., would need 3+ recursive
6221 // steps).
6222 if (NumParts >= 8)
6223 return narrowScalarShiftMultiway(MI, RequestedTy);
6224 }
6225
6226 // Fall back to binary splitting:
6227 // Ignore the input type. We can only go to exactly half the size of the
6228 // input. If that isn't small enough, the resulting pieces will be further
6229 // legalized.
6230 const unsigned NewBitSize = DstEltSize / 2;
6231 const LLT HalfTy = DstTy.getScalarType().changeElementSize(NewBitSize);
6232 const LLT CondTy = LLT::integer(1);
6233
6234 if (auto VRegAndVal = getIConstantVRegValWithLookThrough(Amt, MRI)) {
6235 return narrowScalarShiftByConstant(MI, VRegAndVal->Value, HalfTy,
6236 ShiftAmtTy);
6237 }
6238
6239 // TODO: Expand with known bits.
6240
6241 // Handle the fully general expansion by an unknown amount.
6242 auto NewBits = MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize);
6243
6244 Register InL = MRI.createGenericVirtualRegister(HalfTy);
6245 Register InH = MRI.createGenericVirtualRegister(HalfTy);
6246 MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1));
6247
6248 auto AmtExcess = MIRBuilder.buildSub(ShiftAmtTy, Amt, NewBits);
6249 auto AmtLack = MIRBuilder.buildSub(ShiftAmtTy, NewBits, Amt);
6250
6251 auto Zero = MIRBuilder.buildConstant(ShiftAmtTy, 0);
6252 auto IsShort = MIRBuilder.buildICmp(ICmpInst::ICMP_ULT, CondTy, Amt, NewBits);
6253 auto IsZero = MIRBuilder.buildICmp(ICmpInst::ICMP_EQ, CondTy, Amt, Zero);
6254
6255 Register ResultRegs[2];
6256 switch (MI.getOpcode()) {
6257 case TargetOpcode::G_SHL: {
6258 // Short: ShAmt < NewBitSize
6259 auto LoS = MIRBuilder.buildShl(HalfTy, InL, Amt);
6260
6261 auto LoOr = MIRBuilder.buildLShr(HalfTy, InL, AmtLack);
6262 auto HiOr = MIRBuilder.buildShl(HalfTy, InH, Amt);
6263 auto HiS = MIRBuilder.buildOr(HalfTy, LoOr, HiOr);
6264
6265 // Long: ShAmt >= NewBitSize
6266 auto LoL = MIRBuilder.buildConstant(HalfTy, 0); // Lo part is zero.
6267 auto HiL = MIRBuilder.buildShl(HalfTy, InL, AmtExcess); // Hi from Lo part.
6268
6269 auto Lo = MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL);
6270 auto Hi = MIRBuilder.buildSelect(
6271 HalfTy, IsZero, InH, MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL));
6272
6273 ResultRegs[0] = Lo.getReg(0);
6274 ResultRegs[1] = Hi.getReg(0);
6275 break;
6276 }
6277 case TargetOpcode::G_LSHR:
6278 case TargetOpcode::G_ASHR: {
6279 // Short: ShAmt < NewBitSize
6280 auto HiS = MIRBuilder.buildInstr(MI.getOpcode(), {HalfTy}, {InH, Amt});
6281
6282 auto LoOr = MIRBuilder.buildLShr(HalfTy, InL, Amt);
6283 auto HiOr = MIRBuilder.buildShl(HalfTy, InH, AmtLack);
6284 auto LoS = MIRBuilder.buildOr(HalfTy, LoOr, HiOr);
6285
6286 // Long: ShAmt >= NewBitSize
6288 if (MI.getOpcode() == TargetOpcode::G_LSHR) {
6289 HiL = MIRBuilder.buildConstant(HalfTy, 0); // Hi part is zero.
6290 } else {
6291 auto ShiftAmt = MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize - 1);
6292 HiL = MIRBuilder.buildAShr(HalfTy, InH, ShiftAmt); // Sign of Hi part.
6293 }
6294 auto LoL = MIRBuilder.buildInstr(MI.getOpcode(), {HalfTy},
6295 {InH, AmtExcess}); // Lo from Hi part.
6296
6297 auto Lo = MIRBuilder.buildSelect(
6298 HalfTy, IsZero, InL, MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL));
6299
6300 auto Hi = MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL);
6301
6302 ResultRegs[0] = Lo.getReg(0);
6303 ResultRegs[1] = Hi.getReg(0);
6304 break;
6305 }
6306 default:
6307 llvm_unreachable("not a shift");
6308 }
6309
6310 MIRBuilder.buildMergeLikeInstr(DstReg, ResultRegs);
6311 MI.eraseFromParent();
6312 return Legalized;
6313}
6314
6316 unsigned PartIdx,
6317 unsigned NumParts,
6318 ArrayRef<Register> SrcParts,
6319 const ShiftParams &Params,
6320 LLT TargetTy, LLT ShiftAmtTy) {
6321 auto WordShiftConst = getIConstantVRegVal(Params.WordShift, MRI);
6322 auto BitShiftConst = getIConstantVRegVal(Params.BitShift, MRI);
6323 assert(WordShiftConst && BitShiftConst && "Expected constants");
6324
6325 const unsigned ShiftWords = WordShiftConst->getZExtValue();
6326 const unsigned ShiftBits = BitShiftConst->getZExtValue();
6327 const bool NeedsInterWordShift = ShiftBits != 0;
6328
6329 switch (Opcode) {
6330 case TargetOpcode::G_SHL: {
6331 // Data moves from lower indices to higher indices
6332 // If this part would come from a source beyond our range, it's zero
6333 if (PartIdx < ShiftWords)
6334 return Params.Zero;
6335
6336 unsigned SrcIdx = PartIdx - ShiftWords;
6337 if (!NeedsInterWordShift)
6338 return SrcParts[SrcIdx];
6339
6340 // Combine shifted main part with carry from previous part
6341 auto Hi = MIRBuilder.buildShl(TargetTy, SrcParts[SrcIdx], Params.BitShift);
6342 if (SrcIdx > 0) {
6343 auto Lo = MIRBuilder.buildLShr(TargetTy, SrcParts[SrcIdx - 1],
6344 Params.InvBitShift);
6345 return MIRBuilder.buildOr(TargetTy, Hi, Lo).getReg(0);
6346 }
6347 return Hi.getReg(0);
6348 }
6349
6350 case TargetOpcode::G_LSHR: {
6351 unsigned SrcIdx = PartIdx + ShiftWords;
6352 if (SrcIdx >= NumParts)
6353 return Params.Zero;
6354 if (!NeedsInterWordShift)
6355 return SrcParts[SrcIdx];
6356
6357 // Combine shifted main part with carry from next part
6358 auto Lo = MIRBuilder.buildLShr(TargetTy, SrcParts[SrcIdx], Params.BitShift);
6359 if (SrcIdx + 1 < NumParts) {
6360 auto Hi = MIRBuilder.buildShl(TargetTy, SrcParts[SrcIdx + 1],
6361 Params.InvBitShift);
6362 return MIRBuilder.buildOr(TargetTy, Lo, Hi).getReg(0);
6363 }
6364 return Lo.getReg(0);
6365 }
6366
6367 case TargetOpcode::G_ASHR: {
6368 // Like LSHR but preserves sign bit
6369 unsigned SrcIdx = PartIdx + ShiftWords;
6370 if (SrcIdx >= NumParts)
6371 return Params.SignBit;
6372 if (!NeedsInterWordShift)
6373 return SrcParts[SrcIdx];
6374
6375 // Only the original MSB part uses arithmetic shift to preserve sign. All
6376 // other parts use logical shift since they're just moving data bits.
6377 auto Lo =
6378 (SrcIdx == NumParts - 1)
6379 ? MIRBuilder.buildAShr(TargetTy, SrcParts[SrcIdx], Params.BitShift)
6380 : MIRBuilder.buildLShr(TargetTy, SrcParts[SrcIdx], Params.BitShift);
6381 Register HiSrc =
6382 (SrcIdx + 1 < NumParts) ? SrcParts[SrcIdx + 1] : Params.SignBit;
6383 auto Hi = MIRBuilder.buildShl(TargetTy, HiSrc, Params.InvBitShift);
6384 return MIRBuilder.buildOr(TargetTy, Lo, Hi).getReg(0);
6385 }
6386
6387 default:
6388 llvm_unreachable("not a shift");
6389 }
6390}
6391
6393 Register MainOperand,
6394 Register ShiftAmt,
6395 LLT TargetTy,
6396 Register CarryOperand) {
6397 // This helper generates a single output part for variable shifts by combining
6398 // the main operand (shifted by BitShift) with carry bits from an adjacent
6399 // part.
6400
6401 // For G_ASHR, individual parts don't have their own sign bit, only the
6402 // complete value does. So we use LSHR for the main operand shift in ASHR
6403 // context.
6404 unsigned MainOpcode = (Opcode == TargetOpcode::G_ASHR)
6405 ? static_cast<unsigned>(TargetOpcode::G_LSHR)
6406 : Opcode;
6407
6408 // Perform the primary shift on the main operand
6409 Register MainShifted =
6410 MIRBuilder.buildInstr(MainOpcode, {TargetTy}, {MainOperand, ShiftAmt})
6411 .getReg(0);
6412
6413 // No carry operand available
6414 if (!CarryOperand.isValid())
6415 return MainShifted;
6416
6417 // If BitShift is 0 (word-aligned shift), no inter-word bit movement occurs,
6418 // so carry bits aren't needed.
6419 LLT ShiftAmtTy = MRI.getType(ShiftAmt);
6420 auto ZeroConst = MIRBuilder.buildConstant(ShiftAmtTy, 0);
6421 LLT BoolTy = LLT::scalar(1);
6422 auto IsZeroBitShift =
6423 MIRBuilder.buildICmp(ICmpInst::ICMP_EQ, BoolTy, ShiftAmt, ZeroConst);
6424
6425 // Extract bits from the adjacent part that will "carry over" into this part.
6426 // The carry direction is opposite to the main shift direction, so we can
6427 // align the two shifted values before combining them with OR.
6428
6429 // Determine the carry shift opcode (opposite direction)
6430 unsigned CarryOpcode = (Opcode == TargetOpcode::G_SHL) ? TargetOpcode::G_LSHR
6431 : TargetOpcode::G_SHL;
6432
6433 // Calculate inverse shift amount: BitWidth - ShiftAmt
6434 auto TargetBitsConst =
6435 MIRBuilder.buildConstant(ShiftAmtTy, TargetTy.getScalarSizeInBits());
6436 auto InvShiftAmt = MIRBuilder.buildSub(ShiftAmtTy, TargetBitsConst, ShiftAmt);
6437
6438 // Shift the carry operand
6439 Register CarryBits =
6441 .buildInstr(CarryOpcode, {TargetTy}, {CarryOperand, InvShiftAmt})
6442 .getReg(0);
6443
6444 // If BitShift is 0, don't include carry bits (InvShiftAmt would equal
6445 // TargetBits which would be poison for the individual carry shift operation).
6446 auto ZeroReg = MIRBuilder.buildConstant(TargetTy, 0);
6447 Register SafeCarryBits =
6448 MIRBuilder.buildSelect(TargetTy, IsZeroBitShift, ZeroReg, CarryBits)
6449 .getReg(0);
6450
6451 // Combine the main shifted part with the carry bits
6452 return MIRBuilder.buildOr(TargetTy, MainShifted, SafeCarryBits).getReg(0);
6453}
6454
6457 const APInt &Amt,
6458 LLT TargetTy,
6459 LLT ShiftAmtTy) {
6460 // Any wide shift can be decomposed into WordShift + BitShift components.
6461 // When shift amount is known constant, directly compute the decomposition
6462 // values and generate constant registers.
6463 Register DstReg = MI.getOperand(0).getReg();
6464 Register SrcReg = MI.getOperand(1).getReg();
6465 LLT DstTy = MRI.getType(DstReg);
6466
6467 const unsigned DstBits = DstTy.getScalarSizeInBits();
6468 const unsigned TargetBits = TargetTy.getScalarSizeInBits();
6469 const unsigned NumParts = DstBits / TargetBits;
6470
6471 assert(DstBits % TargetBits == 0 && "Target type must evenly divide source");
6472
6473 // When the shift amount is known at compile time, we just calculate which
6474 // source parts contribute to each output part.
6475
6476 SmallVector<Register, 8> SrcParts;
6477 extractParts(SrcReg, TargetTy, NumParts, SrcParts, MIRBuilder, MRI);
6478
6479 if (Amt.isZero()) {
6480 // No shift needed, just copy
6481 MIRBuilder.buildMergeLikeInstr(DstReg, SrcParts);
6482 MI.eraseFromParent();
6483 return Legalized;
6484 }
6485
6486 ShiftParams Params;
6487 const unsigned ShiftWords = Amt.getZExtValue() / TargetBits;
6488 const unsigned ShiftBits = Amt.getZExtValue() % TargetBits;
6489
6490 // Generate constants and values needed by all shift types
6491 Params.WordShift = MIRBuilder.buildConstant(ShiftAmtTy, ShiftWords).getReg(0);
6492 Params.BitShift = MIRBuilder.buildConstant(ShiftAmtTy, ShiftBits).getReg(0);
6493 Params.InvBitShift =
6494 MIRBuilder.buildConstant(ShiftAmtTy, TargetBits - ShiftBits).getReg(0);
6495 Params.Zero = MIRBuilder.buildConstant(TargetTy, 0).getReg(0);
6496
6497 // For ASHR, we need the sign-extended value to fill shifted-out positions
6498 if (MI.getOpcode() == TargetOpcode::G_ASHR)
6499 Params.SignBit =
6501 .buildAShr(TargetTy, SrcParts[SrcParts.size() - 1],
6502 MIRBuilder.buildConstant(ShiftAmtTy, TargetBits - 1))
6503 .getReg(0);
6504
6505 SmallVector<Register, 8> DstParts(NumParts);
6506 for (unsigned I = 0; I < NumParts; ++I)
6507 DstParts[I] = buildConstantShiftPart(MI.getOpcode(), I, NumParts, SrcParts,
6508 Params, TargetTy, ShiftAmtTy);
6509
6510 MIRBuilder.buildMergeLikeInstr(DstReg, DstParts);
6511 MI.eraseFromParent();
6512 return Legalized;
6513}
6514
6517 Register DstReg = MI.getOperand(0).getReg();
6518 Register SrcReg = MI.getOperand(1).getReg();
6519 Register AmtReg = MI.getOperand(2).getReg();
6520 LLT DstTy = MRI.getType(DstReg);
6521 LLT ShiftAmtTy = MRI.getType(AmtReg);
6522
6523 const unsigned DstBits = DstTy.getScalarSizeInBits();
6524 const unsigned TargetBits = TargetTy.getScalarSizeInBits();
6525 const unsigned NumParts = DstBits / TargetBits;
6526
6527 assert(DstBits % TargetBits == 0 && "Target type must evenly divide source");
6528 assert(isPowerOf2_32(TargetBits) && "Target bit width must be power of 2");
6529
6530 // If the shift amount is known at compile time, we can use direct indexing
6531 // instead of generating select chains in the general case.
6532 if (auto VRegAndVal = getIConstantVRegValWithLookThrough(AmtReg, MRI))
6533 return narrowScalarShiftByConstantMultiway(MI, VRegAndVal->Value, TargetTy,
6534 ShiftAmtTy);
6535
6536 // For runtime-variable shift amounts, we must generate a more complex
6537 // sequence that handles all possible shift values using select chains.
6538
6539 // Split the input into target-sized pieces
6540 SmallVector<Register, 8> SrcParts;
6541 extractParts(SrcReg, TargetTy, NumParts, SrcParts, MIRBuilder, MRI);
6542
6543 // Shifting by zero should be a no-op.
6544 auto ZeroAmtConst = MIRBuilder.buildConstant(ShiftAmtTy, 0);
6545 LLT BoolTy = LLT::scalar(1);
6546 auto IsZeroShift =
6547 MIRBuilder.buildICmp(ICmpInst::ICMP_EQ, BoolTy, AmtReg, ZeroAmtConst);
6548
6549 // Any wide shift can be decomposed into two components:
6550 // 1. WordShift: number of complete target-sized words to shift
6551 // 2. BitShift: number of bits to shift within each word
6552 //
6553 // Example: 128-bit >> 50 with 32-bit target:
6554 // WordShift = 50 / 32 = 1 (shift right by 1 complete word)
6555 // BitShift = 50 % 32 = 18 (shift each word right by 18 bits)
6556 unsigned TargetBitsLog2 = Log2_32(TargetBits);
6557 auto TargetBitsLog2Const =
6558 MIRBuilder.buildConstant(ShiftAmtTy, TargetBitsLog2);
6559 auto TargetBitsMask = MIRBuilder.buildConstant(ShiftAmtTy, TargetBits - 1);
6560
6561 Register WordShift =
6562 MIRBuilder.buildLShr(ShiftAmtTy, AmtReg, TargetBitsLog2Const).getReg(0);
6563 Register BitShift =
6564 MIRBuilder.buildAnd(ShiftAmtTy, AmtReg, TargetBitsMask).getReg(0);
6565
6566 // Fill values:
6567 // - SHL/LSHR: fill with zeros
6568 // - ASHR: fill with sign-extended MSB
6569 Register ZeroReg = MIRBuilder.buildConstant(TargetTy, 0).getReg(0);
6570
6571 Register FillValue;
6572 if (MI.getOpcode() == TargetOpcode::G_ASHR) {
6573 auto TargetBitsMinusOneConst =
6574 MIRBuilder.buildConstant(ShiftAmtTy, TargetBits - 1);
6575 FillValue = MIRBuilder
6576 .buildAShr(TargetTy, SrcParts[NumParts - 1],
6577 TargetBitsMinusOneConst)
6578 .getReg(0);
6579 } else {
6580 FillValue = ZeroReg;
6581 }
6582
6583 SmallVector<Register, 8> DstParts(NumParts);
6584
6585 // For each output part, generate a select chain that chooses the correct
6586 // result based on the runtime WordShift value. This handles all possible
6587 // word shift amounts by pre-calculating what each would produce.
6588 for (unsigned I = 0; I < NumParts; ++I) {
6589 // Initialize with appropriate default value for this shift type
6590 Register InBoundsResult = FillValue;
6591
6592 // clang-format off
6593 // Build a branchless select chain by pre-computing results for all possible
6594 // WordShift values (0 to NumParts-1). Each iteration nests a new select:
6595 //
6596 // K=0: select(WordShift==0, result0, FillValue)
6597 // K=1: select(WordShift==1, result1, select(WordShift==0, result0, FillValue))
6598 // K=2: select(WordShift==2, result2, select(WordShift==1, result1, select(...)))
6599 // clang-format on
6600 for (unsigned K = 0; K < NumParts; ++K) {
6601 auto WordShiftKConst = MIRBuilder.buildConstant(ShiftAmtTy, K);
6602 auto IsWordShiftK = MIRBuilder.buildICmp(ICmpInst::ICMP_EQ, BoolTy,
6603 WordShift, WordShiftKConst);
6604
6605 // Calculate source indices for this word shift
6606 //
6607 // For 4-part 128-bit value with K=1 word shift:
6608 // SHL: [3][2][1][0] << K => [2][1][0][Z]
6609 // -> (MainIdx = I-K, CarryIdx = I-K-1)
6610 // LSHR: [3][2][1][0] >> K => [Z][3][2][1]
6611 // -> (MainIdx = I+K, CarryIdx = I+K+1)
6612 int MainSrcIdx;
6613 int CarrySrcIdx; // Index for the word that provides the carried-in bits.
6614
6615 switch (MI.getOpcode()) {
6616 case TargetOpcode::G_SHL:
6617 MainSrcIdx = (int)I - (int)K;
6618 CarrySrcIdx = MainSrcIdx - 1;
6619 break;
6620 case TargetOpcode::G_LSHR:
6621 case TargetOpcode::G_ASHR:
6622 MainSrcIdx = (int)I + (int)K;
6623 CarrySrcIdx = MainSrcIdx + 1;
6624 break;
6625 default:
6626 llvm_unreachable("Not a shift");
6627 }
6628
6629 // Check bounds and build the result for this word shift
6630 Register ResultForK;
6631 if (MainSrcIdx >= 0 && MainSrcIdx < (int)NumParts) {
6632 Register MainOp = SrcParts[MainSrcIdx];
6633 Register CarryOp;
6634
6635 // Determine carry operand with bounds checking
6636 if (CarrySrcIdx >= 0 && CarrySrcIdx < (int)NumParts)
6637 CarryOp = SrcParts[CarrySrcIdx];
6638 else if (MI.getOpcode() == TargetOpcode::G_ASHR &&
6639 CarrySrcIdx >= (int)NumParts)
6640 CarryOp = FillValue; // Use sign extension
6641
6642 ResultForK = buildVariableShiftPart(MI.getOpcode(), MainOp, BitShift,
6643 TargetTy, CarryOp);
6644 } else {
6645 // Out of bounds - use fill value for this k
6646 ResultForK = FillValue;
6647 }
6648
6649 // Select this result if WordShift equals k
6650 InBoundsResult =
6652 .buildSelect(TargetTy, IsWordShiftK, ResultForK, InBoundsResult)
6653 .getReg(0);
6654 }
6655
6656 // Handle zero-shift special case: if shift is 0, use original input
6657 DstParts[I] =
6659 .buildSelect(TargetTy, IsZeroShift, SrcParts[I], InBoundsResult)
6660 .getReg(0);
6661 }
6662
6663 MIRBuilder.buildMergeLikeInstr(DstReg, DstParts);
6664 MI.eraseFromParent();
6665 return Legalized;
6666}
6667
6670 LLT MoreTy) {
6671 assert(TypeIdx == 0 && "Expecting only Idx 0");
6672
6673 Observer.changingInstr(MI);
6674 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
6675 MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB();
6676 MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator());
6677 moreElementsVectorSrc(MI, MoreTy, I);
6678 }
6679
6680 MachineBasicBlock &MBB = *MI.getParent();
6681 MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI());
6682 moreElementsVectorDst(MI, MoreTy, 0);
6683 Observer.changedInstr(MI);
6684 return Legalized;
6685}
6686
6687MachineInstrBuilder LegalizerHelper::getNeutralElementForVecReduce(
6688 unsigned Opcode, MachineIRBuilder &MIRBuilder, LLT Ty) {
6689 assert(Ty.isScalar() && "Expected scalar type to make neutral element for");
6690
6691 switch (Opcode) {
6692 default:
6694 "getNeutralElementForVecReduce called with invalid opcode!");
6695 case TargetOpcode::G_VECREDUCE_ADD:
6696 case TargetOpcode::G_VECREDUCE_OR:
6697 case TargetOpcode::G_VECREDUCE_XOR:
6698 case TargetOpcode::G_VECREDUCE_UMAX:
6699 return MIRBuilder.buildConstant(Ty, 0);
6700 case TargetOpcode::G_VECREDUCE_MUL:
6701 return MIRBuilder.buildConstant(Ty, 1);
6702 case TargetOpcode::G_VECREDUCE_AND:
6703 case TargetOpcode::G_VECREDUCE_UMIN:
6705 Ty, APInt::getAllOnes(Ty.getScalarSizeInBits()));
6706 case TargetOpcode::G_VECREDUCE_SMAX:
6708 Ty, APInt::getSignedMinValue(Ty.getSizeInBits()));
6709 case TargetOpcode::G_VECREDUCE_SMIN:
6711 Ty, APInt::getSignedMaxValue(Ty.getSizeInBits()));
6712 case TargetOpcode::G_VECREDUCE_FADD:
6713 return MIRBuilder.buildFConstant(Ty, -0.0);
6714 case TargetOpcode::G_VECREDUCE_FMUL:
6715 return MIRBuilder.buildFConstant(Ty, 1.0);
6716 case TargetOpcode::G_VECREDUCE_FMINIMUM:
6717 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
6718 assert(false && "getNeutralElementForVecReduce unimplemented for "
6719 "G_VECREDUCE_FMINIMUM and G_VECREDUCE_FMAXIMUM!");
6720 }
6721 llvm_unreachable("switch expected to return!");
6722}
6723
6726 LLT MoreTy) {
6727 unsigned Opc = MI.getOpcode();
6728 switch (Opc) {
6729 case TargetOpcode::G_IMPLICIT_DEF:
6730 case TargetOpcode::G_LOAD: {
6731 if (TypeIdx != 0)
6732 return UnableToLegalize;
6733 Observer.changingInstr(MI);
6734 moreElementsVectorDst(MI, MoreTy, 0);
6735 Observer.changedInstr(MI);
6736 return Legalized;
6737 }
6738 case TargetOpcode::G_STORE:
6739 if (TypeIdx != 0)
6740 return UnableToLegalize;
6741 Observer.changingInstr(MI);
6742 moreElementsVectorSrc(MI, MoreTy, 0);
6743 Observer.changedInstr(MI);
6744 return Legalized;
6745 case TargetOpcode::G_AND:
6746 case TargetOpcode::G_OR:
6747 case TargetOpcode::G_XOR:
6748 case TargetOpcode::G_ADD:
6749 case TargetOpcode::G_SUB:
6750 case TargetOpcode::G_MUL:
6751 case TargetOpcode::G_FADD:
6752 case TargetOpcode::G_FSUB:
6753 case TargetOpcode::G_FMUL:
6754 case TargetOpcode::G_FDIV:
6755 case TargetOpcode::G_FCOPYSIGN:
6756 case TargetOpcode::G_UADDSAT:
6757 case TargetOpcode::G_USUBSAT:
6758 case TargetOpcode::G_SADDSAT:
6759 case TargetOpcode::G_SSUBSAT:
6760 case TargetOpcode::G_SMIN:
6761 case TargetOpcode::G_SMAX:
6762 case TargetOpcode::G_UMIN:
6763 case TargetOpcode::G_UMAX:
6764 case TargetOpcode::G_FMINNUM:
6765 case TargetOpcode::G_FMAXNUM:
6766 case TargetOpcode::G_FMINNUM_IEEE:
6767 case TargetOpcode::G_FMAXNUM_IEEE:
6768 case TargetOpcode::G_FMINIMUM:
6769 case TargetOpcode::G_FMAXIMUM:
6770 case TargetOpcode::G_FMINIMUMNUM:
6771 case TargetOpcode::G_FMAXIMUMNUM:
6772 case TargetOpcode::G_STRICT_FADD:
6773 case TargetOpcode::G_STRICT_FSUB:
6774 case TargetOpcode::G_STRICT_FMUL: {
6775 Observer.changingInstr(MI);
6776 moreElementsVectorSrc(MI, MoreTy, 1);
6777 moreElementsVectorSrc(MI, MoreTy, 2);
6778 moreElementsVectorDst(MI, MoreTy, 0);
6779 Observer.changedInstr(MI);
6780 return Legalized;
6781 }
6782 case TargetOpcode::G_SHL:
6783 case TargetOpcode::G_ASHR:
6784 case TargetOpcode::G_LSHR: {
6785 Observer.changingInstr(MI);
6786 moreElementsVectorSrc(MI, MoreTy, 1);
6787 // The shift operand may have a different scalar type from the source and
6788 // destination operands.
6789 LLT ShiftMoreTy = MoreTy.changeElementType(
6790 MRI.getType(MI.getOperand(2).getReg()).getElementType());
6791 moreElementsVectorSrc(MI, ShiftMoreTy, 2);
6792 moreElementsVectorDst(MI, MoreTy, 0);
6793 Observer.changedInstr(MI);
6794 return Legalized;
6795 }
6796 case TargetOpcode::G_FMA:
6797 case TargetOpcode::G_STRICT_FMA:
6798 case TargetOpcode::G_FSHR:
6799 case TargetOpcode::G_FSHL: {
6800 Observer.changingInstr(MI);
6801 moreElementsVectorSrc(MI, MoreTy, 1);
6802 moreElementsVectorSrc(MI, MoreTy, 2);
6803 moreElementsVectorSrc(MI, MoreTy, 3);
6804 moreElementsVectorDst(MI, MoreTy, 0);
6805 Observer.changedInstr(MI);
6806 return Legalized;
6807 }
6808 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
6809 case TargetOpcode::G_EXTRACT:
6810 if (TypeIdx != 1)
6811 return UnableToLegalize;
6812 Observer.changingInstr(MI);
6813 moreElementsVectorSrc(MI, MoreTy, 1);
6814 Observer.changedInstr(MI);
6815 return Legalized;
6816 case TargetOpcode::G_INSERT:
6817 case TargetOpcode::G_INSERT_VECTOR_ELT:
6818 case TargetOpcode::G_FREEZE:
6819 case TargetOpcode::G_FNEG:
6820 case TargetOpcode::G_FABS:
6821 case TargetOpcode::G_FSQRT:
6822 case TargetOpcode::G_FCEIL:
6823 case TargetOpcode::G_FFLOOR:
6824 case TargetOpcode::G_FNEARBYINT:
6825 case TargetOpcode::G_FRINT:
6826 case TargetOpcode::G_INTRINSIC_ROUND:
6827 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
6828 case TargetOpcode::G_INTRINSIC_TRUNC:
6829 case TargetOpcode::G_BITREVERSE:
6830 case TargetOpcode::G_BSWAP:
6831 case TargetOpcode::G_FCANONICALIZE:
6832 case TargetOpcode::G_SEXT_INREG:
6833 case TargetOpcode::G_ABS:
6834 case TargetOpcode::G_CTLZ:
6835 case TargetOpcode::G_CTPOP:
6836 if (TypeIdx != 0)
6837 return UnableToLegalize;
6838 Observer.changingInstr(MI);
6839 moreElementsVectorSrc(MI, MoreTy, 1);
6840 moreElementsVectorDst(MI, MoreTy, 0);
6841 Observer.changedInstr(MI);
6842 return Legalized;
6843 case TargetOpcode::G_SELECT: {
6844 auto [DstReg, DstTy, CondReg, CondTy] = MI.getFirst2RegLLTs();
6845 if (TypeIdx == 1) {
6846 if (!CondTy.isScalar() ||
6847 DstTy.getElementCount() != MoreTy.getElementCount())
6848 return UnableToLegalize;
6849
6850 // This is turning a scalar select of vectors into a vector
6851 // select. Broadcast the select condition.
6852 auto ShufSplat = MIRBuilder.buildShuffleSplat(MoreTy, CondReg);
6853 Observer.changingInstr(MI);
6854 MI.getOperand(1).setReg(ShufSplat.getReg(0));
6855 Observer.changedInstr(MI);
6856 return Legalized;
6857 }
6858
6859 if (CondTy.isVector())
6860 return UnableToLegalize;
6861
6862 Observer.changingInstr(MI);
6863 moreElementsVectorSrc(MI, MoreTy, 2);
6864 moreElementsVectorSrc(MI, MoreTy, 3);
6865 moreElementsVectorDst(MI, MoreTy, 0);
6866 Observer.changedInstr(MI);
6867 return Legalized;
6868 }
6869 case TargetOpcode::G_UNMERGE_VALUES:
6870 return UnableToLegalize;
6871 case TargetOpcode::G_PHI:
6872 return moreElementsVectorPhi(MI, TypeIdx, MoreTy);
6873 case TargetOpcode::G_SHUFFLE_VECTOR:
6874 return moreElementsVectorShuffle(MI, TypeIdx, MoreTy);
6875 case TargetOpcode::G_BUILD_VECTOR: {
6877 for (auto Op : MI.uses()) {
6878 Elts.push_back(Op.getReg());
6879 }
6880
6881 for (unsigned i = Elts.size(); i < MoreTy.getNumElements(); ++i) {
6882 Elts.push_back(MIRBuilder.buildUndef(MoreTy.getScalarType()));
6883 }
6884
6885 MIRBuilder.buildDeleteTrailingVectorElements(
6886 MI.getOperand(0).getReg(), MIRBuilder.buildInstr(Opc, {MoreTy}, Elts));
6887 MI.eraseFromParent();
6888 return Legalized;
6889 }
6890 case TargetOpcode::G_SEXT:
6891 case TargetOpcode::G_ZEXT:
6892 case TargetOpcode::G_ANYEXT:
6893 case TargetOpcode::G_TRUNC:
6894 case TargetOpcode::G_FPTRUNC:
6895 case TargetOpcode::G_FPEXT:
6896 case TargetOpcode::G_FPTOSI:
6897 case TargetOpcode::G_FPTOUI:
6898 case TargetOpcode::G_FPTOSI_SAT:
6899 case TargetOpcode::G_FPTOUI_SAT:
6900 case TargetOpcode::G_SITOFP:
6901 case TargetOpcode::G_UITOFP: {
6902 Observer.changingInstr(MI);
6903 LLT SrcExtTy;
6904 LLT DstExtTy;
6905 if (TypeIdx == 0) {
6906 DstExtTy = MoreTy;
6907 SrcExtTy = MoreTy.changeElementType(
6908 MRI.getType(MI.getOperand(1).getReg()).getElementType());
6909 } else {
6910 DstExtTy = MoreTy.changeElementType(
6911 MRI.getType(MI.getOperand(0).getReg()).getElementType());
6912 SrcExtTy = MoreTy;
6913 }
6914 moreElementsVectorSrc(MI, SrcExtTy, 1);
6915 moreElementsVectorDst(MI, DstExtTy, 0);
6916 Observer.changedInstr(MI);
6917 return Legalized;
6918 }
6919 case TargetOpcode::G_ICMP:
6920 case TargetOpcode::G_FCMP: {
6921 if (TypeIdx != 1)
6922 return UnableToLegalize;
6923
6924 Observer.changingInstr(MI);
6925 moreElementsVectorSrc(MI, MoreTy, 2);
6926 moreElementsVectorSrc(MI, MoreTy, 3);
6927 LLT CondTy = MoreTy.changeVectorElementType(
6928 MRI.getType(MI.getOperand(0).getReg()).getElementType());
6929 moreElementsVectorDst(MI, CondTy, 0);
6930 Observer.changedInstr(MI);
6931 return Legalized;
6932 }
6933 case TargetOpcode::G_BITCAST: {
6934 if (TypeIdx != 0)
6935 return UnableToLegalize;
6936
6937 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
6938 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
6939
6940 unsigned coefficient = SrcTy.getNumElements() * MoreTy.getNumElements();
6941 if (coefficient % DstTy.getNumElements() != 0)
6942 return UnableToLegalize;
6943
6944 coefficient = coefficient / DstTy.getNumElements();
6945
6946 LLT NewTy = SrcTy.changeElementCount(
6947 ElementCount::get(coefficient, MoreTy.isScalable()));
6948 Observer.changingInstr(MI);
6949 moreElementsVectorSrc(MI, NewTy, 1);
6950 moreElementsVectorDst(MI, MoreTy, 0);
6951 Observer.changedInstr(MI);
6952 return Legalized;
6953 }
6954 case TargetOpcode::G_VECREDUCE_FADD:
6955 case TargetOpcode::G_VECREDUCE_FMUL:
6956 case TargetOpcode::G_VECREDUCE_ADD:
6957 case TargetOpcode::G_VECREDUCE_MUL:
6958 case TargetOpcode::G_VECREDUCE_AND:
6959 case TargetOpcode::G_VECREDUCE_OR:
6960 case TargetOpcode::G_VECREDUCE_XOR:
6961 case TargetOpcode::G_VECREDUCE_SMAX:
6962 case TargetOpcode::G_VECREDUCE_SMIN:
6963 case TargetOpcode::G_VECREDUCE_UMAX:
6964 case TargetOpcode::G_VECREDUCE_UMIN: {
6965 LLT OrigTy = MRI.getType(MI.getOperand(1).getReg());
6966 MachineOperand &MO = MI.getOperand(1);
6967 auto NewVec = MIRBuilder.buildPadVectorWithUndefElements(MoreTy, MO);
6968 auto NeutralElement = getNeutralElementForVecReduce(
6969 MI.getOpcode(), MIRBuilder, MoreTy.getElementType());
6970
6971 LLT IdxTy(TLI.getVectorIdxLLT(MIRBuilder.getDataLayout()));
6972 for (size_t i = OrigTy.getNumElements(), e = MoreTy.getNumElements();
6973 i != e; i++) {
6974 auto Idx = MIRBuilder.buildConstant(IdxTy, i);
6975 NewVec = MIRBuilder.buildInsertVectorElement(MoreTy, NewVec,
6976 NeutralElement, Idx);
6977 }
6978
6979 Observer.changingInstr(MI);
6980 MO.setReg(NewVec.getReg(0));
6981 Observer.changedInstr(MI);
6982 return Legalized;
6983 }
6984
6985 default:
6986 return UnableToLegalize;
6987 }
6988}
6989
6992 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
6993 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
6994 unsigned MaskNumElts = Mask.size();
6995 unsigned SrcNumElts = SrcTy.getNumElements();
6996 LLT DestEltTy = DstTy.getElementType();
6997
6998 if (MaskNumElts == SrcNumElts)
6999 return Legalized;
7000
7001 if (MaskNumElts < SrcNumElts) {
7002 // Extend mask to match new destination vector size with
7003 // undef values.
7004 SmallVector<int, 16> NewMask(SrcNumElts, -1);
7005 llvm::copy(Mask, NewMask.begin());
7006
7007 moreElementsVectorDst(MI, SrcTy, 0);
7008 MIRBuilder.setInstrAndDebugLoc(MI);
7009 MIRBuilder.buildShuffleVector(MI.getOperand(0).getReg(),
7010 MI.getOperand(1).getReg(),
7011 MI.getOperand(2).getReg(), NewMask);
7012 MI.eraseFromParent();
7013
7014 return Legalized;
7015 }
7016
7017 unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
7018 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
7019 LLT PaddedTy =
7020 DstTy.changeVectorElementCount(ElementCount::getFixed(PaddedMaskNumElts));
7021
7022 // Create new source vectors by concatenating the initial
7023 // source vectors with undefined vectors of the same size.
7024 auto Undef = MIRBuilder.buildUndef(SrcTy);
7025 SmallVector<Register, 8> MOps1(NumConcat, Undef.getReg(0));
7026 SmallVector<Register, 8> MOps2(NumConcat, Undef.getReg(0));
7027 MOps1[0] = MI.getOperand(1).getReg();
7028 MOps2[0] = MI.getOperand(2).getReg();
7029
7030 auto Src1 = MIRBuilder.buildConcatVectors(PaddedTy, MOps1);
7031 auto Src2 = MIRBuilder.buildConcatVectors(PaddedTy, MOps2);
7032
7033 // Readjust mask for new input vector length.
7034 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
7035 for (unsigned I = 0; I != MaskNumElts; ++I) {
7036 int Idx = Mask[I];
7037 if (Idx >= static_cast<int>(SrcNumElts))
7038 Idx += PaddedMaskNumElts - SrcNumElts;
7039 MappedOps[I] = Idx;
7040 }
7041
7042 // If we got more elements than required, extract subvector.
7043 if (MaskNumElts != PaddedMaskNumElts) {
7044 auto Shuffle =
7045 MIRBuilder.buildShuffleVector(PaddedTy, Src1, Src2, MappedOps);
7046
7047 SmallVector<Register, 16> Elts(MaskNumElts);
7048 for (unsigned I = 0; I < MaskNumElts; ++I) {
7049 Elts[I] =
7050 MIRBuilder.buildExtractVectorElementConstant(DestEltTy, Shuffle, I)
7051 .getReg(0);
7052 }
7053 MIRBuilder.buildBuildVector(DstReg, Elts);
7054 } else {
7055 MIRBuilder.buildShuffleVector(DstReg, Src1, Src2, MappedOps);
7056 }
7057
7058 MI.eraseFromParent();
7060}
7061
7064 unsigned int TypeIdx, LLT MoreTy) {
7065 auto [DstTy, Src1Ty, Src2Ty] = MI.getFirst3LLTs();
7066 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
7067 unsigned NumElts = DstTy.getNumElements();
7068 unsigned WidenNumElts = MoreTy.getNumElements();
7069
7070 if (DstTy.isVector() && Src1Ty.isVector() &&
7071 DstTy.getNumElements() != Src1Ty.getNumElements()) {
7073 }
7074
7075 if (TypeIdx != 0)
7076 return UnableToLegalize;
7077
7078 // Expect a canonicalized shuffle.
7079 if (DstTy != Src1Ty || DstTy != Src2Ty)
7080 return UnableToLegalize;
7081
7082 moreElementsVectorSrc(MI, MoreTy, 1);
7083 moreElementsVectorSrc(MI, MoreTy, 2);
7084
7085 // Adjust mask based on new input vector length.
7086 SmallVector<int, 16> NewMask(WidenNumElts, -1);
7087 for (unsigned I = 0; I != NumElts; ++I) {
7088 int Idx = Mask[I];
7089 if (Idx < static_cast<int>(NumElts))
7090 NewMask[I] = Idx;
7091 else
7092 NewMask[I] = Idx - NumElts + WidenNumElts;
7093 }
7094 moreElementsVectorDst(MI, MoreTy, 0);
7095 MIRBuilder.setInstrAndDebugLoc(MI);
7096 MIRBuilder.buildShuffleVector(MI.getOperand(0).getReg(),
7097 MI.getOperand(1).getReg(),
7098 MI.getOperand(2).getReg(), NewMask);
7099 MI.eraseFromParent();
7100 return Legalized;
7101}
7102
7103void LegalizerHelper::multiplyRegisters(SmallVectorImpl<Register> &DstRegs,
7104 ArrayRef<Register> Src1Regs,
7105 ArrayRef<Register> Src2Regs,
7106 LLT NarrowTy) {
7108 unsigned SrcParts = Src1Regs.size();
7109 unsigned DstParts = DstRegs.size();
7110
7111 unsigned DstIdx = 0; // Low bits of the result.
7112 Register FactorSum =
7113 B.buildMul(NarrowTy, Src1Regs[DstIdx], Src2Regs[DstIdx]).getReg(0);
7114 DstRegs[DstIdx] = FactorSum;
7115
7116 Register CarrySumPrevDstIdx;
7118
7119 for (DstIdx = 1; DstIdx < DstParts; DstIdx++) {
7120 // Collect low parts of muls for DstIdx.
7121 for (unsigned i = DstIdx + 1 < SrcParts ? 0 : DstIdx - SrcParts + 1;
7122 i <= std::min(DstIdx, SrcParts - 1); ++i) {
7124 B.buildMul(NarrowTy, Src1Regs[DstIdx - i], Src2Regs[i]);
7125 Factors.push_back(Mul.getReg(0));
7126 }
7127 // Collect high parts of muls from previous DstIdx.
7128 for (unsigned i = DstIdx < SrcParts ? 0 : DstIdx - SrcParts;
7129 i <= std::min(DstIdx - 1, SrcParts - 1); ++i) {
7130 MachineInstrBuilder Umulh =
7131 B.buildUMulH(NarrowTy, Src1Regs[DstIdx - 1 - i], Src2Regs[i]);
7132 Factors.push_back(Umulh.getReg(0));
7133 }
7134 // Add CarrySum from additions calculated for previous DstIdx.
7135 if (DstIdx != 1) {
7136 Factors.push_back(CarrySumPrevDstIdx);
7137 }
7138
7139 Register CarrySum;
7140 // Add all factors and accumulate all carries into CarrySum.
7141 if (DstIdx != DstParts - 1) {
7142 MachineInstrBuilder Uaddo =
7143 B.buildUAddo(NarrowTy, LLT::integer(1), Factors[0], Factors[1]);
7144 FactorSum = Uaddo.getReg(0);
7145 CarrySum = B.buildZExt(NarrowTy, Uaddo.getReg(1)).getReg(0);
7146 for (unsigned i = 2; i < Factors.size(); ++i) {
7147 MachineInstrBuilder Uaddo =
7148 B.buildUAddo(NarrowTy, LLT::integer(1), FactorSum, Factors[i]);
7149 FactorSum = Uaddo.getReg(0);
7150 MachineInstrBuilder Carry = B.buildZExt(NarrowTy, Uaddo.getReg(1));
7151 CarrySum = B.buildAdd(NarrowTy, CarrySum, Carry).getReg(0);
7152 }
7153 } else {
7154 // Since value for the next index is not calculated, neither is CarrySum.
7155 FactorSum = B.buildAdd(NarrowTy, Factors[0], Factors[1]).getReg(0);
7156 for (unsigned i = 2; i < Factors.size(); ++i)
7157 FactorSum = B.buildAdd(NarrowTy, FactorSum, Factors[i]).getReg(0);
7158 }
7159
7160 CarrySumPrevDstIdx = CarrySum;
7161 DstRegs[DstIdx] = FactorSum;
7162 Factors.clear();
7163 }
7164}
7165
7168 LLT NarrowTy) {
7169 if (TypeIdx != 0)
7170 return UnableToLegalize;
7171
7172 Register DstReg = MI.getOperand(0).getReg();
7173 LLT DstType = MRI.getType(DstReg);
7174 // FIXME: add support for vector types
7175 if (DstType.isVector())
7176 return UnableToLegalize;
7177
7178 unsigned Opcode = MI.getOpcode();
7179 unsigned OpO, OpE, OpF;
7180 switch (Opcode) {
7181 case TargetOpcode::G_SADDO:
7182 case TargetOpcode::G_SADDE:
7183 case TargetOpcode::G_UADDO:
7184 case TargetOpcode::G_UADDE:
7185 case TargetOpcode::G_ADD:
7186 OpO = TargetOpcode::G_UADDO;
7187 OpE = TargetOpcode::G_UADDE;
7188 OpF = TargetOpcode::G_UADDE;
7189 if (Opcode == TargetOpcode::G_SADDO || Opcode == TargetOpcode::G_SADDE)
7190 OpF = TargetOpcode::G_SADDE;
7191 break;
7192 case TargetOpcode::G_SSUBO:
7193 case TargetOpcode::G_SSUBE:
7194 case TargetOpcode::G_USUBO:
7195 case TargetOpcode::G_USUBE:
7196 case TargetOpcode::G_SUB:
7197 OpO = TargetOpcode::G_USUBO;
7198 OpE = TargetOpcode::G_USUBE;
7199 OpF = TargetOpcode::G_USUBE;
7200 if (Opcode == TargetOpcode::G_SSUBO || Opcode == TargetOpcode::G_SSUBE)
7201 OpF = TargetOpcode::G_SSUBE;
7202 break;
7203 default:
7204 llvm_unreachable("Unexpected add/sub opcode!");
7205 }
7206
7207 // 1 for a plain add/sub, 2 if this is an operation with a carry-out.
7208 unsigned NumDefs = MI.getNumExplicitDefs();
7209 Register Src1 = MI.getOperand(NumDefs).getReg();
7210 Register Src2 = MI.getOperand(NumDefs + 1).getReg();
7211 Register CarryDst, CarryIn;
7212 if (NumDefs == 2)
7213 CarryDst = MI.getOperand(1).getReg();
7214 if (MI.getNumOperands() == NumDefs + 3)
7215 CarryIn = MI.getOperand(NumDefs + 2).getReg();
7216
7217 LLT RegTy = MRI.getType(MI.getOperand(0).getReg());
7218 LLT LeftoverTy, DummyTy;
7219 SmallVector<Register, 2> Src1Regs, Src2Regs, Src1Left, Src2Left, DstRegs;
7220 extractParts(Src1, RegTy, NarrowTy, LeftoverTy, Src1Regs, Src1Left,
7221 MIRBuilder, MRI);
7222 extractParts(Src2, RegTy, NarrowTy, DummyTy, Src2Regs, Src2Left, MIRBuilder,
7223 MRI);
7224
7225 int NarrowParts = Src1Regs.size();
7226 Src1Regs.append(Src1Left);
7227 Src2Regs.append(Src2Left);
7228 DstRegs.reserve(Src1Regs.size());
7229
7230 for (int i = 0, e = Src1Regs.size(); i != e; ++i) {
7231 Register DstReg =
7232 MRI.createGenericVirtualRegister(MRI.getType(Src1Regs[i]));
7233 Register CarryOut;
7234 // Forward the final carry-out to the destination register
7235 if (i == e - 1 && CarryDst)
7236 CarryOut = CarryDst;
7237 else
7238 CarryOut = MRI.createGenericVirtualRegister(LLT::integer(1));
7239
7240 if (!CarryIn) {
7241 MIRBuilder.buildInstr(OpO, {DstReg, CarryOut},
7242 {Src1Regs[i], Src2Regs[i]});
7243 } else if (i == e - 1) {
7244 MIRBuilder.buildInstr(OpF, {DstReg, CarryOut},
7245 {Src1Regs[i], Src2Regs[i], CarryIn});
7246 } else {
7247 MIRBuilder.buildInstr(OpE, {DstReg, CarryOut},
7248 {Src1Regs[i], Src2Regs[i], CarryIn});
7249 }
7250
7251 DstRegs.push_back(DstReg);
7252 CarryIn = CarryOut;
7253 }
7254 insertParts(MI.getOperand(0).getReg(), RegTy, NarrowTy,
7255 ArrayRef(DstRegs).take_front(NarrowParts), LeftoverTy,
7256 ArrayRef(DstRegs).drop_front(NarrowParts));
7257
7258 MI.eraseFromParent();
7259 return Legalized;
7260}
7261
7264 auto [DstReg, Src1, Src2] = MI.getFirst3Regs();
7265
7266 LLT Ty = MRI.getType(DstReg);
7267 if (Ty.isVector())
7268 return UnableToLegalize;
7269
7270 unsigned Size = Ty.getSizeInBits();
7271 unsigned NarrowSize = NarrowTy.getSizeInBits();
7272 if (Size % NarrowSize != 0)
7273 return UnableToLegalize;
7274
7275 unsigned NumParts = Size / NarrowSize;
7276 bool IsMulHigh = MI.getOpcode() == TargetOpcode::G_UMULH;
7277 unsigned DstTmpParts = NumParts * (IsMulHigh ? 2 : 1);
7278
7279 SmallVector<Register, 2> Src1Parts, Src2Parts;
7280 SmallVector<Register, 2> DstTmpRegs(DstTmpParts);
7281 extractParts(Src1, NarrowTy, NumParts, Src1Parts, MIRBuilder, MRI);
7282 extractParts(Src2, NarrowTy, NumParts, Src2Parts, MIRBuilder, MRI);
7283 multiplyRegisters(DstTmpRegs, Src1Parts, Src2Parts, NarrowTy);
7284
7285 // Take only high half of registers if this is high mul.
7286 ArrayRef<Register> DstRegs(&DstTmpRegs[DstTmpParts - NumParts], NumParts);
7287 MIRBuilder.buildMergeLikeInstr(DstReg, DstRegs);
7288 MI.eraseFromParent();
7289 return Legalized;
7290}
7291
7294 LLT NarrowTy) {
7295 if (TypeIdx != 0)
7296 return UnableToLegalize;
7297
7298 bool IsSigned = MI.getOpcode() == TargetOpcode::G_FPTOSI;
7299
7300 Register Src = MI.getOperand(1).getReg();
7301 LLT SrcTy = MRI.getType(Src);
7302
7303 // If all finite floats fit into the narrowed integer type, we can just swap
7304 // out the result type. This is practically only useful for conversions from
7305 // half to at least 16-bits, so just handle the one case.
7306 if (SrcTy.getScalarType() != LLT::scalar(16) ||
7307 NarrowTy.getScalarSizeInBits() < (IsSigned ? 17u : 16u))
7308 return UnableToLegalize;
7309
7310 Observer.changingInstr(MI);
7311 narrowScalarDst(MI, NarrowTy, 0,
7312 IsSigned ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT);
7313 Observer.changedInstr(MI);
7314 return Legalized;
7315}
7316
7319 LLT NarrowTy) {
7320 if (TypeIdx != 1)
7321 return UnableToLegalize;
7322
7323 uint64_t NarrowSize = NarrowTy.getSizeInBits();
7324
7325 int64_t SizeOp1 = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
7326 // FIXME: add support for when SizeOp1 isn't an exact multiple of
7327 // NarrowSize.
7328 if (SizeOp1 % NarrowSize != 0)
7329 return UnableToLegalize;
7330 int NumParts = SizeOp1 / NarrowSize;
7331
7332 SmallVector<Register, 2> SrcRegs, DstRegs;
7333 extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs,
7334 MIRBuilder, MRI);
7335
7336 Register OpReg = MI.getOperand(0).getReg();
7337 uint64_t OpStart = MI.getOperand(2).getImm();
7338 uint64_t OpSize = MRI.getType(OpReg).getSizeInBits();
7339 for (int i = 0; i < NumParts; ++i) {
7340 unsigned SrcStart = i * NarrowSize;
7341
7342 if (SrcStart + NarrowSize <= OpStart || SrcStart >= OpStart + OpSize) {
7343 // No part of the extract uses this subregister, ignore it.
7344 continue;
7345 } else if (SrcStart == OpStart && NarrowTy == MRI.getType(OpReg)) {
7346 // The entire subregister is extracted, forward the value.
7347 DstRegs.push_back(SrcRegs[i]);
7348 continue;
7349 }
7350
7351 // OpSegStart is where this destination segment would start in OpReg if it
7352 // extended infinitely in both directions.
7353 int64_t ExtractOffset;
7354 uint64_t SegSize;
7355 if (OpStart < SrcStart) {
7356 ExtractOffset = 0;
7357 SegSize = std::min(NarrowSize, OpStart + OpSize - SrcStart);
7358 } else {
7359 ExtractOffset = OpStart - SrcStart;
7360 SegSize = std::min(SrcStart + NarrowSize - OpStart, OpSize);
7361 }
7362
7363 Register SegReg = SrcRegs[i];
7364 if (ExtractOffset != 0 || SegSize != NarrowSize) {
7365 // A genuine extract is needed.
7366 SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize));
7367 MIRBuilder.buildExtract(SegReg, SrcRegs[i], ExtractOffset);
7368 }
7369
7370 DstRegs.push_back(SegReg);
7371 }
7372
7373 Register DstReg = MI.getOperand(0).getReg();
7374 if (MRI.getType(DstReg).isVector())
7375 MIRBuilder.buildBuildVector(DstReg, DstRegs);
7376 else if (DstRegs.size() > 1)
7377 MIRBuilder.buildMergeLikeInstr(DstReg, DstRegs);
7378 else
7379 MIRBuilder.buildCopy(DstReg, DstRegs[0]);
7380 MI.eraseFromParent();
7381 return Legalized;
7382}
7383
7386 LLT NarrowTy) {
7387 // FIXME: Don't know how to handle secondary types yet.
7388 if (TypeIdx != 0)
7389 return UnableToLegalize;
7390
7391 SmallVector<Register, 2> SrcRegs, LeftoverRegs, DstRegs;
7392 LLT RegTy = MRI.getType(MI.getOperand(0).getReg());
7393 LLT LeftoverTy;
7394 extractParts(MI.getOperand(1).getReg(), RegTy, NarrowTy, LeftoverTy, SrcRegs,
7395 LeftoverRegs, MIRBuilder, MRI);
7396
7397 SrcRegs.append(LeftoverRegs);
7398
7399 uint64_t NarrowSize = NarrowTy.getSizeInBits();
7400 Register OpReg = MI.getOperand(2).getReg();
7401 uint64_t OpStart = MI.getOperand(3).getImm();
7402 uint64_t OpSize = MRI.getType(OpReg).getSizeInBits();
7403 for (int I = 0, E = SrcRegs.size(); I != E; ++I) {
7404 unsigned DstStart = I * NarrowSize;
7405
7406 if (DstStart == OpStart && NarrowTy == MRI.getType(OpReg)) {
7407 // The entire subregister is defined by this insert, forward the new
7408 // value.
7409 DstRegs.push_back(OpReg);
7410 continue;
7411 }
7412
7413 Register SrcReg = SrcRegs[I];
7414 if (MRI.getType(SrcRegs[I]) == LeftoverTy) {
7415 // The leftover reg is smaller than NarrowTy, so we need to extend it.
7416 SrcReg = MRI.createGenericVirtualRegister(NarrowTy);
7417 MIRBuilder.buildAnyExt(SrcReg, SrcRegs[I]);
7418 }
7419
7420 if (DstStart + NarrowSize <= OpStart || DstStart >= OpStart + OpSize) {
7421 // No part of the insert affects this subregister, forward the original.
7422 DstRegs.push_back(SrcReg);
7423 continue;
7424 }
7425
7426 // OpSegStart is where this destination segment would start in OpReg if it
7427 // extended infinitely in both directions.
7428 int64_t ExtractOffset, InsertOffset;
7429 uint64_t SegSize;
7430 if (OpStart < DstStart) {
7431 InsertOffset = 0;
7432 ExtractOffset = DstStart - OpStart;
7433 SegSize = std::min(NarrowSize, OpStart + OpSize - DstStart);
7434 } else {
7435 InsertOffset = OpStart - DstStart;
7436 ExtractOffset = 0;
7437 SegSize =
7438 std::min(NarrowSize - InsertOffset, OpStart + OpSize - DstStart);
7439 }
7440
7441 Register SegReg = OpReg;
7442 if (ExtractOffset != 0 || SegSize != OpSize) {
7443 // A genuine extract is needed.
7444 SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize));
7445 MIRBuilder.buildExtract(SegReg, OpReg, ExtractOffset);
7446 }
7447
7448 Register DstReg = MRI.createGenericVirtualRegister(NarrowTy);
7449 MIRBuilder.buildInsert(DstReg, SrcReg, SegReg, InsertOffset);
7450 DstRegs.push_back(DstReg);
7451 }
7452
7453 uint64_t WideSize = DstRegs.size() * NarrowSize;
7454 Register DstReg = MI.getOperand(0).getReg();
7455 if (WideSize > RegTy.getSizeInBits()) {
7456 Register MergeReg = MRI.createGenericVirtualRegister(LLT::scalar(WideSize));
7457 MIRBuilder.buildMergeLikeInstr(MergeReg, DstRegs);
7458 MIRBuilder.buildTrunc(DstReg, MergeReg);
7459 } else
7460 MIRBuilder.buildMergeLikeInstr(DstReg, DstRegs);
7461
7462 MI.eraseFromParent();
7463 return Legalized;
7464}
7465
7468 LLT NarrowTy) {
7469 Register DstReg = MI.getOperand(0).getReg();
7470 LLT DstTy = MRI.getType(DstReg);
7471
7472 assert(MI.getNumOperands() == 3 && TypeIdx == 0);
7473
7474 SmallVector<Register, 4> DstRegs, DstLeftoverRegs;
7475 SmallVector<Register, 4> Src0Regs, Src0LeftoverRegs;
7476 SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs;
7477 LLT LeftoverTy;
7478 if (!extractParts(MI.getOperand(1).getReg(), DstTy, NarrowTy, LeftoverTy,
7479 Src0Regs, Src0LeftoverRegs, MIRBuilder, MRI))
7480 return UnableToLegalize;
7481
7482 LLT Unused;
7483 if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, Unused,
7484 Src1Regs, Src1LeftoverRegs, MIRBuilder, MRI))
7485 llvm_unreachable("inconsistent extractParts result");
7486
7487 for (unsigned I = 0, E = Src1Regs.size(); I != E; ++I) {
7488 auto Inst = MIRBuilder.buildInstr(MI.getOpcode(), {NarrowTy},
7489 {Src0Regs[I], Src1Regs[I]});
7490 DstRegs.push_back(Inst.getReg(0));
7491 }
7492
7493 for (unsigned I = 0, E = Src1LeftoverRegs.size(); I != E; ++I) {
7494 auto Inst = MIRBuilder.buildInstr(
7495 MI.getOpcode(),
7496 {LeftoverTy}, {Src0LeftoverRegs[I], Src1LeftoverRegs[I]});
7497 DstLeftoverRegs.push_back(Inst.getReg(0));
7498 }
7499
7500 insertParts(DstReg, DstTy, NarrowTy, DstRegs,
7501 LeftoverTy, DstLeftoverRegs);
7502
7503 MI.eraseFromParent();
7504 return Legalized;
7505}
7506
7509 LLT NarrowTy) {
7510 if (TypeIdx != 0)
7511 return UnableToLegalize;
7512
7513 auto [DstReg, SrcReg] = MI.getFirst2Regs();
7514
7515 LLT DstTy = MRI.getType(DstReg);
7516 if (DstTy.isVector())
7517 return UnableToLegalize;
7518
7520 LLT GCDTy = extractGCDType(Parts, DstTy, NarrowTy, SrcReg);
7521 LLT LCMTy = buildLCMMergePieces(DstTy, NarrowTy, GCDTy, Parts, MI.getOpcode());
7522 buildWidenedRemergeToDst(DstReg, LCMTy, Parts);
7523
7524 MI.eraseFromParent();
7525 return Legalized;
7526}
7527
7530 LLT NarrowTy) {
7531 if (TypeIdx != 0)
7532 return UnableToLegalize;
7533
7534 Register CondReg = MI.getOperand(1).getReg();
7535 LLT CondTy = MRI.getType(CondReg);
7536 if (CondTy.isVector()) // TODO: Handle vselect
7537 return UnableToLegalize;
7538
7539 Register DstReg = MI.getOperand(0).getReg();
7540 LLT DstTy = MRI.getType(DstReg);
7541
7542 SmallVector<Register, 4> DstRegs, DstLeftoverRegs;
7543 SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs;
7544 SmallVector<Register, 4> Src2Regs, Src2LeftoverRegs;
7545 LLT LeftoverTy;
7546 if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, LeftoverTy,
7547 Src1Regs, Src1LeftoverRegs, MIRBuilder, MRI))
7548 return UnableToLegalize;
7549
7550 LLT Unused;
7551 if (!extractParts(MI.getOperand(3).getReg(), DstTy, NarrowTy, Unused,
7552 Src2Regs, Src2LeftoverRegs, MIRBuilder, MRI))
7553 llvm_unreachable("inconsistent extractParts result");
7554
7555 for (unsigned I = 0, E = Src1Regs.size(); I != E; ++I) {
7556 auto Select = MIRBuilder.buildSelect(NarrowTy,
7557 CondReg, Src1Regs[I], Src2Regs[I]);
7558 DstRegs.push_back(Select.getReg(0));
7559 }
7560
7561 for (unsigned I = 0, E = Src1LeftoverRegs.size(); I != E; ++I) {
7562 auto Select = MIRBuilder.buildSelect(
7563 LeftoverTy, CondReg, Src1LeftoverRegs[I], Src2LeftoverRegs[I]);
7564 DstLeftoverRegs.push_back(Select.getReg(0));
7565 }
7566
7567 insertParts(DstReg, DstTy, NarrowTy, DstRegs,
7568 LeftoverTy, DstLeftoverRegs);
7569
7570 MI.eraseFromParent();
7571 return Legalized;
7572}
7573
7576 LLT NarrowTy) {
7577 if (TypeIdx != 1)
7578 return UnableToLegalize;
7579
7580 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
7581 unsigned NarrowSize = NarrowTy.getSizeInBits();
7582
7583 if (SrcTy.isScalar() && SrcTy.getSizeInBits() == 2 * NarrowSize) {
7584 const bool IsUndef = MI.getOpcode() == TargetOpcode::G_CTLZ_ZERO_POISON;
7585
7587 auto UnmergeSrc = B.buildUnmerge(NarrowTy, SrcReg);
7588 // ctlz(Hi:Lo) -> Hi == 0 ? (NarrowSize + ctlz(Lo)) : ctlz(Hi)
7589 auto C_0 = B.buildConstant(NarrowTy, 0);
7590 auto HiIsZero = B.buildICmp(CmpInst::ICMP_EQ, LLT::integer(1),
7591 UnmergeSrc.getReg(1), C_0);
7592 auto LoCTLZ = IsUndef ? B.buildCTLZ_ZERO_POISON(DstTy, UnmergeSrc.getReg(0))
7593 : B.buildCTLZ(DstTy, UnmergeSrc.getReg(0));
7594 auto C_NarrowSize = B.buildConstant(DstTy, NarrowSize);
7595 auto HiIsZeroCTLZ = B.buildAdd(DstTy, LoCTLZ, C_NarrowSize);
7596 auto HiCTLZ = B.buildCTLZ_ZERO_POISON(DstTy, UnmergeSrc.getReg(1));
7597 B.buildSelect(DstReg, HiIsZero, HiIsZeroCTLZ, HiCTLZ);
7598
7599 MI.eraseFromParent();
7600 return Legalized;
7601 }
7602
7603 return UnableToLegalize;
7604}
7605
7608 LLT NarrowTy) {
7609 if (TypeIdx != 1)
7610 return UnableToLegalize;
7611
7612 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
7613 unsigned NarrowSize = NarrowTy.getSizeInBits();
7614
7615 if (SrcTy.isScalar() && SrcTy.getSizeInBits() == 2 * NarrowSize) {
7616 const bool IsUndef = MI.getOpcode() == TargetOpcode::G_CTTZ_ZERO_POISON;
7617
7619 auto UnmergeSrc = B.buildUnmerge(NarrowTy, SrcReg);
7620 // cttz(Hi:Lo) -> Lo == 0 ? (cttz(Hi) + NarrowSize) : cttz(Lo)
7621 auto C_0 = B.buildConstant(NarrowTy, 0);
7622 auto LoIsZero = B.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1),
7623 UnmergeSrc.getReg(0), C_0);
7624 auto HiCTTZ = IsUndef ? B.buildCTTZ_ZERO_POISON(DstTy, UnmergeSrc.getReg(1))
7625 : B.buildCTTZ(DstTy, UnmergeSrc.getReg(1));
7626 auto C_NarrowSize = B.buildConstant(DstTy, NarrowSize);
7627 auto LoIsZeroCTTZ = B.buildAdd(DstTy, HiCTTZ, C_NarrowSize);
7628 auto LoCTTZ = B.buildCTTZ_ZERO_POISON(DstTy, UnmergeSrc.getReg(0));
7629 B.buildSelect(DstReg, LoIsZero, LoIsZeroCTTZ, LoCTTZ);
7630
7631 MI.eraseFromParent();
7632 return Legalized;
7633 }
7634
7635 return UnableToLegalize;
7636}
7637
7640 LLT NarrowTy) {
7641 if (TypeIdx != 1)
7642 return UnableToLegalize;
7643
7644 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
7645 unsigned NarrowSize = NarrowTy.getSizeInBits();
7646
7647 if (!SrcTy.isScalar() || SrcTy.getSizeInBits() != 2 * NarrowSize)
7648 return UnableToLegalize;
7649
7651
7652 auto UnmergeSrc = B.buildUnmerge(NarrowTy, SrcReg);
7653 Register Lo = UnmergeSrc.getReg(0);
7654 Register Hi = UnmergeSrc.getReg(1);
7655
7656 auto ShAmt = B.buildConstant(NarrowTy, NarrowSize - 1);
7657 auto Sign = B.buildAShr(NarrowTy, Hi, ShAmt);
7658
7659 auto HiIsSign = B.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1), Hi, Sign);
7660
7661 // Invert Lo if Hi is negative. Then count the leading zeros. If there are no
7662 // leading zeros, then the MSB of Lo is different than the MSB of Hi.
7663 // Otherwise the leading zeros represent additional sign bits of the original
7664 // value.
7665 auto LoInv = B.buildXor(DstTy, Lo, Sign);
7666 auto LoCTLZ = B.buildCTLZ(DstTy, LoInv);
7667
7668 // Add NarrowSize-1 to LoCTLZ. This is the full CTLS if Hi is all sign bits.
7669 auto C_NarrowSizeM1 = B.buildConstant(DstTy, NarrowSize - 1);
7670 auto HiIsSignCTLS = B.buildAdd(DstTy, LoCTLZ, C_NarrowSizeM1);
7671
7672 auto HiCTLS = B.buildCTLS(DstTy, Hi);
7673
7674 B.buildSelect(DstReg, HiIsSign, HiIsSignCTLS, HiCTLS);
7675
7676 MI.eraseFromParent();
7677 return Legalized;
7678}
7679
7682 LLT NarrowTy) {
7683 if (TypeIdx != 1)
7684 return UnableToLegalize;
7685
7686 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
7687 unsigned NarrowSize = NarrowTy.getSizeInBits();
7688
7689 if (SrcTy.isScalar() && SrcTy.getSizeInBits() == 2 * NarrowSize) {
7690 auto UnmergeSrc = MIRBuilder.buildUnmerge(NarrowTy, MI.getOperand(1));
7691
7692 auto LoCTPOP = MIRBuilder.buildCTPOP(DstTy, UnmergeSrc.getReg(0));
7693 auto HiCTPOP = MIRBuilder.buildCTPOP(DstTy, UnmergeSrc.getReg(1));
7694 MIRBuilder.buildAdd(DstReg, HiCTPOP, LoCTPOP);
7695
7696 MI.eraseFromParent();
7697 return Legalized;
7698 }
7699
7700 return UnableToLegalize;
7701}
7702
7705 LLT NarrowTy) {
7706 if (TypeIdx != 1)
7707 return UnableToLegalize;
7708
7710 Register ExpReg = MI.getOperand(2).getReg();
7711 LLT ExpTy = MRI.getType(ExpReg);
7712
7713 unsigned ClampSize = NarrowTy.getScalarSizeInBits();
7714
7715 // Clamp the exponent to the range of the target type.
7716 auto MinExp = B.buildConstant(ExpTy, minIntN(ClampSize));
7717 auto ClampMin = B.buildSMax(ExpTy, ExpReg, MinExp);
7718 auto MaxExp = B.buildConstant(ExpTy, maxIntN(ClampSize));
7719 auto Clamp = B.buildSMin(ExpTy, ClampMin, MaxExp);
7720
7721 auto Trunc = B.buildTrunc(NarrowTy, Clamp);
7722 Observer.changingInstr(MI);
7723 MI.getOperand(2).setReg(Trunc.getReg(0));
7724 Observer.changedInstr(MI);
7725 return Legalized;
7726}
7727
7730 unsigned Opc = MI.getOpcode();
7731 const auto &TII = MIRBuilder.getTII();
7732 auto isSupported = [this](const LegalityQuery &Q) {
7733 auto QAction = LI.getAction(Q).Action;
7734 return QAction == Legal || QAction == Libcall || QAction == Custom;
7735 };
7736 switch (Opc) {
7737 default:
7738 return UnableToLegalize;
7739 case TargetOpcode::G_CTLZ_ZERO_POISON: {
7740 // This trivially expands to CTLZ.
7741 Observer.changingInstr(MI);
7742 MI.setDesc(TII.get(TargetOpcode::G_CTLZ));
7743 Observer.changedInstr(MI);
7744 return Legalized;
7745 }
7746 case TargetOpcode::G_CTLZ: {
7747 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
7748 unsigned Len = SrcTy.getScalarSizeInBits();
7749
7750 if (isSupported({TargetOpcode::G_CTLZ_ZERO_POISON, {DstTy, SrcTy}})) {
7751 // If CTLZ_ZERO_POISON is supported, emit that and a select for zero.
7752 auto CtlzZU = MIRBuilder.buildCTLZ_ZERO_POISON(DstTy, SrcReg);
7753 auto ZeroSrc = MIRBuilder.buildConstant(SrcTy, 0);
7754 auto ICmp = MIRBuilder.buildICmp(
7755 CmpInst::ICMP_EQ, SrcTy.changeElementSize(1), SrcReg, ZeroSrc);
7756 auto LenConst = MIRBuilder.buildConstant(DstTy, Len);
7757 MIRBuilder.buildSelect(DstReg, ICmp, LenConst, CtlzZU);
7758 MI.eraseFromParent();
7759 return Legalized;
7760 }
7761 // for now, we do this:
7762 // NewLen = NextPowerOf2(Len);
7763 // x = x | (x >> 1);
7764 // x = x | (x >> 2);
7765 // ...
7766 // x = x | (x >>16);
7767 // x = x | (x >>32); // for 64-bit input
7768 // Upto NewLen/2
7769 // return Len - popcount(x);
7770 //
7771 // Ref: "Hacker's Delight" by Henry Warren
7772 Register Op = SrcReg;
7773 unsigned NewLen = PowerOf2Ceil(Len);
7774 for (unsigned i = 0; (1U << i) <= (NewLen / 2); ++i) {
7775 auto MIBShiftAmt = MIRBuilder.buildConstant(SrcTy, 1ULL << i);
7776 auto MIBOp = MIRBuilder.buildOr(
7777 SrcTy, Op, MIRBuilder.buildLShr(SrcTy, Op, MIBShiftAmt));
7778 Op = MIBOp.getReg(0);
7779 }
7780 auto MIBPop = MIRBuilder.buildCTPOP(DstTy, Op);
7781 MIRBuilder.buildSub(MI.getOperand(0), MIRBuilder.buildConstant(DstTy, Len),
7782 MIBPop);
7783 MI.eraseFromParent();
7784 return Legalized;
7785 }
7786 case TargetOpcode::G_CTTZ_ZERO_POISON: {
7787 // This trivially expands to CTTZ.
7788 Observer.changingInstr(MI);
7789 MI.setDesc(TII.get(TargetOpcode::G_CTTZ));
7790 Observer.changedInstr(MI);
7791 return Legalized;
7792 }
7793 case TargetOpcode::G_CTTZ: {
7794 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
7795
7796 unsigned Len = SrcTy.getScalarSizeInBits();
7797 if (isSupported({TargetOpcode::G_CTTZ_ZERO_POISON, {DstTy, SrcTy}})) {
7798 // If CTTZ_ZERO_POISON is legal or custom, emit that and a select with
7799 // zero.
7800 auto CttzZU = MIRBuilder.buildCTTZ_ZERO_POISON(DstTy, SrcReg);
7801 auto Zero = MIRBuilder.buildConstant(SrcTy, 0);
7802 auto ICmp = MIRBuilder.buildICmp(
7803 CmpInst::ICMP_EQ, DstTy.changeElementSize(1), SrcReg, Zero);
7804 auto LenConst = MIRBuilder.buildConstant(DstTy, Len);
7805 MIRBuilder.buildSelect(DstReg, ICmp, LenConst, CttzZU);
7806 MI.eraseFromParent();
7807 return Legalized;
7808 }
7809 // for now, we use: { return popcount(~x & (x - 1)); }
7810 // unless the target has ctlz but not ctpop, in which case we use:
7811 // { return 32 - nlz(~x & (x-1)); }
7812 // Ref: "Hacker's Delight" by Henry Warren
7813 auto MIBCstNeg1 = MIRBuilder.buildConstant(SrcTy, -1);
7814 auto MIBNot = MIRBuilder.buildXor(SrcTy, SrcReg, MIBCstNeg1);
7815 auto MIBTmp = MIRBuilder.buildAnd(
7816 SrcTy, MIBNot, MIRBuilder.buildAdd(SrcTy, SrcReg, MIBCstNeg1));
7817 if (!isSupported({TargetOpcode::G_CTPOP, {SrcTy, SrcTy}}) &&
7818 isSupported({TargetOpcode::G_CTLZ, {SrcTy, SrcTy}})) {
7819 auto MIBCstLen = MIRBuilder.buildConstant(SrcTy, Len);
7820 MIRBuilder.buildSub(MI.getOperand(0), MIBCstLen,
7821 MIRBuilder.buildCTLZ(SrcTy, MIBTmp));
7822 MI.eraseFromParent();
7823 return Legalized;
7824 }
7825 Observer.changingInstr(MI);
7826 MI.setDesc(TII.get(TargetOpcode::G_CTPOP));
7827 MI.getOperand(1).setReg(MIBTmp.getReg(0));
7828 Observer.changedInstr(MI);
7829 return Legalized;
7830 }
7831 case TargetOpcode::G_CTPOP: {
7832 Register SrcReg = MI.getOperand(1).getReg();
7833 LLT Ty = MRI.getType(SrcReg);
7834 unsigned Size = Ty.getScalarSizeInBits();
7836
7837 // Bail out on irregular type lengths.
7838 if (Size > 128 || Size % 8 != 0)
7839 return UnableToLegalize;
7840
7841 // Count set bits in blocks of 2 bits. Default approach would be
7842 // B2Count = { val & 0x55555555 } + { (val >> 1) & 0x55555555 }
7843 // We use following formula instead:
7844 // B2Count = val - { (val >> 1) & 0x55555555 }
7845 // since it gives same result in blocks of 2 with one instruction less.
7846 auto C_1 = B.buildConstant(Ty, 1);
7847 auto B2Set1LoTo1Hi = B.buildLShr(Ty, SrcReg, C_1);
7848 APInt B2Mask1HiTo0 = APInt::getSplat(Size, APInt(8, 0x55));
7849 auto C_B2Mask1HiTo0 = B.buildConstant(Ty, B2Mask1HiTo0);
7850 auto B2Count1Hi = B.buildAnd(Ty, B2Set1LoTo1Hi, C_B2Mask1HiTo0);
7851 auto B2Count = B.buildSub(Ty, SrcReg, B2Count1Hi);
7852
7853 // In order to get count in blocks of 4 add values from adjacent block of 2.
7854 // B4Count = { B2Count & 0x33333333 } + { (B2Count >> 2) & 0x33333333 }
7855 auto C_2 = B.buildConstant(Ty, 2);
7856 auto B4Set2LoTo2Hi = B.buildLShr(Ty, B2Count, C_2);
7857 APInt B4Mask2HiTo0 = APInt::getSplat(Size, APInt(8, 0x33));
7858 auto C_B4Mask2HiTo0 = B.buildConstant(Ty, B4Mask2HiTo0);
7859 auto B4HiB2Count = B.buildAnd(Ty, B4Set2LoTo2Hi, C_B4Mask2HiTo0);
7860 auto B4LoB2Count = B.buildAnd(Ty, B2Count, C_B4Mask2HiTo0);
7861 auto B4Count = B.buildAdd(Ty, B4HiB2Count, B4LoB2Count);
7862
7863 // For count in blocks of 8 bits we don't have to mask high 4 bits before
7864 // addition since count value sits in range {0,...,8} and 4 bits are enough
7865 // to hold such binary values. After addition high 4 bits still hold count
7866 // of set bits in high 4 bit block, set them to zero and get 8 bit result.
7867 // B8Count = { B4Count + (B4Count >> 4) } & 0x0F0F0F0F
7868 auto C_4 = B.buildConstant(Ty, 4);
7869 auto B8HiB4Count = B.buildLShr(Ty, B4Count, C_4);
7870 auto B8CountDirty4Hi = B.buildAdd(Ty, B8HiB4Count, B4Count);
7871 APInt B8Mask4HiTo0 = APInt::getSplat(Size, APInt(8, 0x0F));
7872 auto C_B8Mask4HiTo0 = B.buildConstant(Ty, B8Mask4HiTo0);
7873 auto B8Count = B.buildAnd(Ty, B8CountDirty4Hi, C_B8Mask4HiTo0);
7874
7875 assert(Size <= 128 && "Scalar size is too large for CTPOP lower algorithm");
7876
7877 // Avoid the multiply when shift-add is cheaper.
7878 if (Size == 16 && !Ty.isVector()) {
7879 // v = (v + (v >> 8)) & 0xFF;
7880 auto C_8 = B.buildConstant(Ty, 8);
7881 auto HighSum = B.buildLShr(Ty, B8Count, C_8);
7882 auto Res = B.buildAdd(Ty, B8Count, HighSum);
7883 B.buildAnd(MI.getOperand(0).getReg(), Res, B.buildConstant(Ty, 0xFF));
7884 MI.eraseFromParent();
7885 return Legalized;
7886 }
7887
7888 // 8 bits can hold CTPOP result of 128 bit int or smaller. Mul with this
7889 // bitmask will set 8 msb in ResTmp to sum of all B8Counts in 8 bit blocks.
7890 auto MulMask = B.buildConstant(Ty, APInt::getSplat(Size, APInt(8, 0x01)));
7891
7892 // Shift count result from 8 high bits to low bits.
7893 auto C_SizeM8 = B.buildConstant(Ty, Size - 8);
7894
7895 auto IsMulSupported = [this](const LLT Ty) {
7896 auto Action = LI.getAction({TargetOpcode::G_MUL, {Ty}}).Action;
7897 return Action == Legal || Action == WidenScalar || Action == Custom;
7898 };
7899 if (IsMulSupported(Ty)) {
7900 auto ResTmp = B.buildMul(Ty, B8Count, MulMask);
7901 B.buildLShr(MI.getOperand(0).getReg(), ResTmp, C_SizeM8);
7902 } else {
7903 auto ResTmp = B8Count;
7904 for (unsigned Shift = 8; Shift < Size; Shift *= 2) {
7905 auto ShiftC = B.buildConstant(Ty, Shift);
7906 auto Shl = B.buildShl(Ty, ResTmp, ShiftC);
7907 ResTmp = B.buildAdd(Ty, ResTmp, Shl);
7908 }
7909 B.buildLShr(MI.getOperand(0).getReg(), ResTmp, C_SizeM8);
7910 }
7911 MI.eraseFromParent();
7912 return Legalized;
7913 }
7914 case TargetOpcode::G_CTLS: {
7915 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
7916
7917 // ctls(x) -> ctlz(x ^ (x >> (N - 1))) - 1
7918 auto SignIdxC =
7919 MIRBuilder.buildConstant(SrcTy, SrcTy.getScalarSizeInBits() - 1);
7920 auto OneC = MIRBuilder.buildConstant(DstTy, 1);
7921
7922 auto Shr = MIRBuilder.buildAShr(SrcTy, SrcReg, SignIdxC);
7923
7924 auto Xor = MIRBuilder.buildXor(SrcTy, SrcReg, Shr);
7925 auto Ctlz = MIRBuilder.buildCTLZ(DstTy, Xor);
7926
7927 MIRBuilder.buildSub(DstReg, Ctlz, OneC);
7928 MI.eraseFromParent();
7929 return Legalized;
7930 }
7931 }
7932}
7933
7934// Check that (every element of) Reg is undef or not an exact multiple of BW.
7936 Register Reg, unsigned BW) {
7937 return matchUnaryPredicate(
7938 MRI, Reg,
7939 [=](const Constant *C) {
7940 // Null constant here means an undef.
7942 return !CI || CI->getValue().urem(BW) != 0;
7943 },
7944 /*AllowUndefs*/ true);
7945}
7946
7949 auto [Dst, X, Y, Z] = MI.getFirst4Regs();
7950 LLT Ty = MRI.getType(Dst);
7951 LLT ShTy = MRI.getType(Z);
7952
7953 unsigned BW = Ty.getScalarSizeInBits();
7954
7955 if (!isPowerOf2_32(BW))
7956 return UnableToLegalize;
7957
7958 const bool IsFSHL = MI.getOpcode() == TargetOpcode::G_FSHL;
7959 unsigned RevOpcode = IsFSHL ? TargetOpcode::G_FSHR : TargetOpcode::G_FSHL;
7960
7961 if (isNonZeroModBitWidthOrUndef(MRI, Z, BW)) {
7962 // fshl X, Y, Z -> fshr X, Y, -Z
7963 // fshr X, Y, Z -> fshl X, Y, -Z
7964 auto Zero = MIRBuilder.buildConstant(ShTy, 0);
7965 Z = MIRBuilder.buildSub(Ty, Zero, Z).getReg(0);
7966 } else {
7967 // fshl X, Y, Z -> fshr (srl X, 1), (fshr X, Y, 1), ~Z
7968 // fshr X, Y, Z -> fshl (fshl X, Y, 1), (shl Y, 1), ~Z
7969 auto One = MIRBuilder.buildConstant(ShTy, 1);
7970 if (IsFSHL) {
7971 Y = MIRBuilder.buildInstr(RevOpcode, {Ty}, {X, Y, One}).getReg(0);
7972 X = MIRBuilder.buildLShr(Ty, X, One).getReg(0);
7973 } else {
7974 X = MIRBuilder.buildInstr(RevOpcode, {Ty}, {X, Y, One}).getReg(0);
7975 Y = MIRBuilder.buildShl(Ty, Y, One).getReg(0);
7976 }
7977
7978 Z = MIRBuilder.buildNot(ShTy, Z).getReg(0);
7979 }
7980
7981 MIRBuilder.buildInstr(RevOpcode, {Dst}, {X, Y, Z});
7982 MI.eraseFromParent();
7983 return Legalized;
7984}
7985
7988 auto [Dst, X, Y, Z] = MI.getFirst4Regs();
7989 LLT Ty = MRI.getType(Dst);
7990 LLT ShTy = MRI.getType(Z);
7991
7992 const unsigned BW = Ty.getScalarSizeInBits();
7993 const bool IsFSHL = MI.getOpcode() == TargetOpcode::G_FSHL;
7994
7995 Register ShX, ShY;
7996 Register ShAmt, InvShAmt;
7997
7998 // FIXME: Emit optimized urem by constant instead of letting it expand later.
7999 if (isNonZeroModBitWidthOrUndef(MRI, Z, BW)) {
8000 // fshl: X << C | Y >> (BW - C)
8001 // fshr: X << (BW - C) | Y >> C
8002 // where C = Z % BW is not zero
8003 auto BitWidthC = MIRBuilder.buildConstant(ShTy, BW);
8004 ShAmt = MIRBuilder.buildURem(ShTy, Z, BitWidthC).getReg(0);
8005 InvShAmt = MIRBuilder.buildSub(ShTy, BitWidthC, ShAmt).getReg(0);
8006 ShX = MIRBuilder.buildShl(Ty, X, IsFSHL ? ShAmt : InvShAmt).getReg(0);
8007 ShY = MIRBuilder.buildLShr(Ty, Y, IsFSHL ? InvShAmt : ShAmt).getReg(0);
8008 } else {
8009 // fshl: X << (Z % BW) | Y >> 1 >> (BW - 1 - (Z % BW))
8010 // fshr: X << 1 << (BW - 1 - (Z % BW)) | Y >> (Z % BW)
8011 auto Mask = MIRBuilder.buildConstant(ShTy, BW - 1);
8012 if (isPowerOf2_32(BW)) {
8013 // Z % BW -> Z & (BW - 1)
8014 ShAmt = MIRBuilder.buildAnd(ShTy, Z, Mask).getReg(0);
8015 // (BW - 1) - (Z % BW) -> ~Z & (BW - 1)
8016 auto NotZ = MIRBuilder.buildNot(ShTy, Z);
8017 InvShAmt = MIRBuilder.buildAnd(ShTy, NotZ, Mask).getReg(0);
8018 } else {
8019 auto BitWidthC = MIRBuilder.buildConstant(ShTy, BW);
8020 ShAmt = MIRBuilder.buildURem(ShTy, Z, BitWidthC).getReg(0);
8021 InvShAmt = MIRBuilder.buildSub(ShTy, Mask, ShAmt).getReg(0);
8022 }
8023
8024 auto One = MIRBuilder.buildConstant(ShTy, 1);
8025 if (IsFSHL) {
8026 ShX = MIRBuilder.buildShl(Ty, X, ShAmt).getReg(0);
8027 auto ShY1 = MIRBuilder.buildLShr(Ty, Y, One);
8028 ShY = MIRBuilder.buildLShr(Ty, ShY1, InvShAmt).getReg(0);
8029 } else {
8030 auto ShX1 = MIRBuilder.buildShl(Ty, X, One);
8031 ShX = MIRBuilder.buildShl(Ty, ShX1, InvShAmt).getReg(0);
8032 ShY = MIRBuilder.buildLShr(Ty, Y, ShAmt).getReg(0);
8033 }
8034 }
8035
8036 MIRBuilder.buildOr(Dst, ShX, ShY, MachineInstr::Disjoint);
8037 MI.eraseFromParent();
8038 return Legalized;
8039}
8040
8043 // These operations approximately do the following (while avoiding undefined
8044 // shifts by BW):
8045 // G_FSHL: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
8046 // G_FSHR: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
8047 Register Dst = MI.getOperand(0).getReg();
8048 LLT Ty = MRI.getType(Dst);
8049 LLT ShTy = MRI.getType(MI.getOperand(3).getReg());
8050
8051 bool IsFSHL = MI.getOpcode() == TargetOpcode::G_FSHL;
8052 unsigned RevOpcode = IsFSHL ? TargetOpcode::G_FSHR : TargetOpcode::G_FSHL;
8053
8054 // TODO: Use smarter heuristic that accounts for vector legalization.
8055 if (LI.getAction({RevOpcode, {Ty, ShTy}}).Action == Lower)
8056 return lowerFunnelShiftAsShifts(MI);
8057
8058 // This only works for powers of 2, fallback to shifts if it fails.
8059 LegalizerHelper::LegalizeResult Result = lowerFunnelShiftWithInverse(MI);
8060 if (Result == UnableToLegalize)
8061 return lowerFunnelShiftAsShifts(MI);
8062 return Result;
8063}
8064
8066 auto [Dst, Src] = MI.getFirst2Regs();
8067 LLT DstTy = MRI.getType(Dst);
8068 LLT SrcTy = MRI.getType(Src);
8069
8070 uint32_t DstTySize = DstTy.getSizeInBits();
8071 uint32_t DstTyScalarSize = DstTy.getScalarSizeInBits();
8072 uint32_t SrcTyScalarSize = SrcTy.getScalarSizeInBits();
8073
8074 if (!isPowerOf2_32(DstTySize) || !isPowerOf2_32(DstTyScalarSize) ||
8075 !isPowerOf2_32(SrcTyScalarSize))
8076 return UnableToLegalize;
8077
8078 // The step between extend is too large, split it by creating an intermediate
8079 // extend instruction
8080 if (SrcTyScalarSize * 2 < DstTyScalarSize) {
8081 LLT MidTy = SrcTy.changeElementSize(SrcTyScalarSize * 2);
8082 // If the destination type is illegal, split it into multiple statements
8083 // zext x -> zext(merge(zext(unmerge), zext(unmerge)))
8084 auto NewExt = MIRBuilder.buildInstr(MI.getOpcode(), {MidTy}, {Src});
8085 // Unmerge the vector
8086 LLT EltTy = MidTy.changeElementCount(
8088 auto UnmergeSrc = MIRBuilder.buildUnmerge(EltTy, NewExt);
8089
8090 // ZExt the vectors
8091 LLT ZExtResTy = DstTy.changeElementCount(
8093 auto ZExtRes1 = MIRBuilder.buildInstr(MI.getOpcode(), {ZExtResTy},
8094 {UnmergeSrc.getReg(0)});
8095 auto ZExtRes2 = MIRBuilder.buildInstr(MI.getOpcode(), {ZExtResTy},
8096 {UnmergeSrc.getReg(1)});
8097
8098 // Merge the ending vectors
8099 MIRBuilder.buildMergeLikeInstr(Dst, {ZExtRes1, ZExtRes2});
8100
8101 MI.eraseFromParent();
8102 return Legalized;
8103 }
8104 return UnableToLegalize;
8105}
8106
8108 // MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
8109 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
8110 // Similar to how operand splitting is done in SelectiondDAG, we can handle
8111 // %res(v8s8) = G_TRUNC %in(v8s32) by generating:
8112 // %inlo(<4x s32>), %inhi(<4 x s32>) = G_UNMERGE %in(<8 x s32>)
8113 // %lo16(<4 x s16>) = G_TRUNC %inlo
8114 // %hi16(<4 x s16>) = G_TRUNC %inhi
8115 // %in16(<8 x s16>) = G_CONCAT_VECTORS %lo16, %hi16
8116 // %res(<8 x s8>) = G_TRUNC %in16
8117
8118 assert(MI.getOpcode() == TargetOpcode::G_TRUNC);
8119
8120 Register DstReg = MI.getOperand(0).getReg();
8121 Register SrcReg = MI.getOperand(1).getReg();
8122 LLT DstTy = MRI.getType(DstReg);
8123 LLT SrcTy = MRI.getType(SrcReg);
8124
8125 if (DstTy.isVector() && isPowerOf2_32(DstTy.getNumElements()) &&
8127 isPowerOf2_32(SrcTy.getNumElements()) &&
8128 isPowerOf2_32(SrcTy.getScalarSizeInBits())) {
8129 // Split input type.
8130 LLT SplitSrcTy = SrcTy.changeElementCount(
8131 SrcTy.getElementCount().divideCoefficientBy(2));
8132
8133 // First, split the source into two smaller vectors.
8134 SmallVector<Register, 2> SplitSrcs;
8135 extractParts(SrcReg, SplitSrcTy, 2, SplitSrcs, MIRBuilder, MRI);
8136
8137 // Truncate the splits into intermediate narrower elements.
8138 LLT InterTy;
8139 if (DstTy.getScalarSizeInBits() * 2 < SrcTy.getScalarSizeInBits())
8140 InterTy = SplitSrcTy.changeElementSize(DstTy.getScalarSizeInBits() * 2);
8141 else
8142 InterTy = SplitSrcTy.changeElementSize(DstTy.getScalarSizeInBits());
8143 for (Register &Src : SplitSrcs)
8144 Src = MIRBuilder.buildTrunc(InterTy, Src).getReg(0);
8145
8146 // Combine the new truncates into one vector
8147 auto Merge = MIRBuilder.buildMergeLikeInstr(
8148 DstTy.changeElementSize(InterTy.getScalarSizeInBits()), SplitSrcs);
8149
8150 // Truncate the new vector to the final result type
8151 if (DstTy.getScalarSizeInBits() * 2 < SrcTy.getScalarSizeInBits())
8152 MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), Merge.getReg(0));
8153 else
8154 MIRBuilder.buildCopy(MI.getOperand(0).getReg(), Merge.getReg(0));
8155
8156 MI.eraseFromParent();
8157
8158 return Legalized;
8159 }
8160 return UnableToLegalize;
8161}
8162
8165 auto [Dst, DstTy, Src, SrcTy, Amt, AmtTy] = MI.getFirst3RegLLTs();
8166 auto Zero = MIRBuilder.buildConstant(AmtTy, 0);
8167 bool IsLeft = MI.getOpcode() == TargetOpcode::G_ROTL;
8168 unsigned RevRot = IsLeft ? TargetOpcode::G_ROTR : TargetOpcode::G_ROTL;
8169 auto Neg = MIRBuilder.buildSub(AmtTy, Zero, Amt);
8170 MIRBuilder.buildInstr(RevRot, {Dst}, {Src, Neg});
8171 MI.eraseFromParent();
8172 return Legalized;
8173}
8174
8176 auto [Dst, DstTy, Src, SrcTy, Amt, AmtTy] = MI.getFirst3RegLLTs();
8177
8178 unsigned EltSizeInBits = DstTy.getScalarSizeInBits();
8179 bool IsLeft = MI.getOpcode() == TargetOpcode::G_ROTL;
8180
8181 MIRBuilder.setInstrAndDebugLoc(MI);
8182
8183 // If a rotate in the other direction is supported, use it.
8184 unsigned RevRot = IsLeft ? TargetOpcode::G_ROTR : TargetOpcode::G_ROTL;
8185 if (LI.isLegalOrCustom({RevRot, {DstTy, SrcTy}}) &&
8186 isPowerOf2_32(EltSizeInBits))
8187 return lowerRotateWithReverseRotate(MI);
8188
8189 // If a funnel shift is supported, use it.
8190 unsigned FShOpc = IsLeft ? TargetOpcode::G_FSHL : TargetOpcode::G_FSHR;
8191 unsigned RevFsh = !IsLeft ? TargetOpcode::G_FSHL : TargetOpcode::G_FSHR;
8192 bool IsFShLegal = false;
8193 if ((IsFShLegal = LI.isLegalOrCustom({FShOpc, {DstTy, AmtTy}})) ||
8194 LI.isLegalOrCustom({RevFsh, {DstTy, AmtTy}})) {
8195 auto buildFunnelShift = [&](unsigned Opc, Register R1, Register R2,
8196 Register R3) {
8197 MIRBuilder.buildInstr(Opc, {R1}, {R2, R2, R3});
8198 MI.eraseFromParent();
8199 return Legalized;
8200 };
8201 // If a funnel shift in the other direction is supported, use it.
8202 if (IsFShLegal) {
8203 return buildFunnelShift(FShOpc, Dst, Src, Amt);
8204 } else if (isPowerOf2_32(EltSizeInBits)) {
8205 Amt = MIRBuilder.buildNeg(DstTy, Amt).getReg(0);
8206 return buildFunnelShift(RevFsh, Dst, Src, Amt);
8207 }
8208 }
8209
8210 auto Zero = MIRBuilder.buildConstant(AmtTy, 0);
8211 unsigned ShOpc = IsLeft ? TargetOpcode::G_SHL : TargetOpcode::G_LSHR;
8212 unsigned RevShiftOpc = IsLeft ? TargetOpcode::G_LSHR : TargetOpcode::G_SHL;
8213 auto BitWidthMinusOneC = MIRBuilder.buildConstant(AmtTy, EltSizeInBits - 1);
8214 Register ShVal;
8215 Register RevShiftVal;
8216 if (isPowerOf2_32(EltSizeInBits)) {
8217 // (rotl x, c) -> x << (c & (w - 1)) | x >> (-c & (w - 1))
8218 // (rotr x, c) -> x >> (c & (w - 1)) | x << (-c & (w - 1))
8219 auto NegAmt = MIRBuilder.buildSub(AmtTy, Zero, Amt);
8220 auto ShAmt = MIRBuilder.buildAnd(AmtTy, Amt, BitWidthMinusOneC);
8221 ShVal = MIRBuilder.buildInstr(ShOpc, {DstTy}, {Src, ShAmt}).getReg(0);
8222 auto RevAmt = MIRBuilder.buildAnd(AmtTy, NegAmt, BitWidthMinusOneC);
8223 RevShiftVal =
8224 MIRBuilder.buildInstr(RevShiftOpc, {DstTy}, {Src, RevAmt}).getReg(0);
8225 } else {
8226 // (rotl x, c) -> x << (c % w) | x >> 1 >> (w - 1 - (c % w))
8227 // (rotr x, c) -> x >> (c % w) | x << 1 << (w - 1 - (c % w))
8228 auto BitWidthC = MIRBuilder.buildConstant(AmtTy, EltSizeInBits);
8229 auto ShAmt = MIRBuilder.buildURem(AmtTy, Amt, BitWidthC);
8230 ShVal = MIRBuilder.buildInstr(ShOpc, {DstTy}, {Src, ShAmt}).getReg(0);
8231 auto RevAmt = MIRBuilder.buildSub(AmtTy, BitWidthMinusOneC, ShAmt);
8232 auto One = MIRBuilder.buildConstant(AmtTy, 1);
8233 auto Inner = MIRBuilder.buildInstr(RevShiftOpc, {DstTy}, {Src, One});
8234 RevShiftVal =
8235 MIRBuilder.buildInstr(RevShiftOpc, {DstTy}, {Inner, RevAmt}).getReg(0);
8236 }
8237 MIRBuilder.buildOr(Dst, ShVal, RevShiftVal, MachineInstr::Disjoint);
8238 MI.eraseFromParent();
8239 return Legalized;
8240}
8241
8242// Expand s32 = G_UITOFP s64 using bit operations to an IEEE float
8243// representation.
8246 auto [Dst, Src] = MI.getFirst2Regs();
8247 const LLT S64 = LLT::scalar(64);
8248 const LLT S32 = LLT::scalar(32);
8249 const LLT S1 = LLT::scalar(1);
8250
8251 assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S32);
8252
8253 // unsigned cul2f(ulong u) {
8254 // uint lz = clz(u);
8255 // uint e = (u != 0) ? 127U + 63U - lz : 0;
8256 // u = (u << lz) & 0x7fffffffffffffffUL;
8257 // ulong t = u & 0xffffffffffUL;
8258 // uint v = (e << 23) | (uint)(u >> 40);
8259 // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U);
8260 // return as_float(v + r);
8261 // }
8262
8263 auto Zero32 = MIRBuilder.buildConstant(S32, 0);
8264 auto Zero64 = MIRBuilder.buildConstant(S64, 0);
8265
8266 auto LZ = MIRBuilder.buildCTLZ_ZERO_POISON(S32, Src);
8267
8268 auto K = MIRBuilder.buildConstant(S32, 127U + 63U);
8269 auto Sub = MIRBuilder.buildSub(S32, K, LZ);
8270
8271 auto NotZero = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, Src, Zero64);
8272 auto E = MIRBuilder.buildSelect(S32, NotZero, Sub, Zero32);
8273
8274 auto Mask0 = MIRBuilder.buildConstant(S64, (-1ULL) >> 1);
8275 auto ShlLZ = MIRBuilder.buildShl(S64, Src, LZ);
8276
8277 auto U = MIRBuilder.buildAnd(S64, ShlLZ, Mask0);
8278
8279 auto Mask1 = MIRBuilder.buildConstant(S64, 0xffffffffffULL);
8280 auto T = MIRBuilder.buildAnd(S64, U, Mask1);
8281
8282 auto UShl = MIRBuilder.buildLShr(S64, U, MIRBuilder.buildConstant(S64, 40));
8283 auto ShlE = MIRBuilder.buildShl(S32, E, MIRBuilder.buildConstant(S32, 23));
8284 auto V = MIRBuilder.buildOr(S32, ShlE, MIRBuilder.buildTrunc(S32, UShl));
8285
8286 auto C = MIRBuilder.buildConstant(S64, 0x8000000000ULL);
8287 auto RCmp = MIRBuilder.buildICmp(CmpInst::ICMP_UGT, S1, T, C);
8288 auto TCmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, S1, T, C);
8289 auto One = MIRBuilder.buildConstant(S32, 1);
8290
8291 auto VTrunc1 = MIRBuilder.buildAnd(S32, V, One);
8292 auto Select0 = MIRBuilder.buildSelect(S32, TCmp, VTrunc1, Zero32);
8293 auto R = MIRBuilder.buildSelect(S32, RCmp, One, Select0);
8294 MIRBuilder.buildAdd(Dst, V, R);
8295
8296 MI.eraseFromParent();
8297 return Legalized;
8298}
8299
8300// Expand s32 = G_UITOFP s64 to an IEEE float representation using bit
8301// operations and G_SITOFP
8304 auto [Dst, Src] = MI.getFirst2Regs();
8305 const LLT S64 = LLT::scalar(64);
8306 const LLT S32 = LLT::scalar(32);
8307 const LLT S1 = LLT::scalar(1);
8308
8309 assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S32);
8310
8311 // For i64 < INT_MAX we simply reuse SITOFP.
8312 // Otherwise, divide i64 by 2, round result by ORing with the lowest bit
8313 // saved before division, convert to float by SITOFP, multiply the result
8314 // by 2.
8315 auto One = MIRBuilder.buildConstant(S64, 1);
8316 auto Zero = MIRBuilder.buildConstant(S64, 0);
8317 // Result if Src < INT_MAX
8318 auto SmallResult = MIRBuilder.buildSITOFP(S32, Src);
8319 // Result if Src >= INT_MAX
8320 auto Halved = MIRBuilder.buildLShr(S64, Src, One);
8321 auto LowerBit = MIRBuilder.buildAnd(S64, Src, One);
8322 auto RoundedHalved = MIRBuilder.buildOr(S64, Halved, LowerBit);
8323 auto HalvedFP = MIRBuilder.buildSITOFP(S32, RoundedHalved);
8324 auto LargeResult = MIRBuilder.buildFAdd(S32, HalvedFP, HalvedFP);
8325 // Check if the original value is larger than INT_MAX by comparing with
8326 // zero to pick one of the two conversions.
8327 auto IsLarge =
8328 MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_SLT, S1, Src, Zero);
8329 MIRBuilder.buildSelect(Dst, IsLarge, LargeResult, SmallResult);
8330
8331 MI.eraseFromParent();
8332 return Legalized;
8333}
8334
8335// Expand s64 = G_UITOFP s64 using bit and float arithmetic operations to an
8336// IEEE double representation.
8339 auto [Dst, Src] = MI.getFirst2Regs();
8340 const LLT S64 = LLT::scalar(64);
8341 const LLT S32 = LLT::scalar(32);
8342
8343 assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S64);
8344
8345 // We create double value from 32 bit parts with 32 exponent difference.
8346 // Note that + and - are float operations that adjust the implicit leading
8347 // one, the bases 2^52 and 2^84 are for illustrative purposes.
8348 //
8349 // X = 2^52 * 1.0...LowBits
8350 // Y = 2^84 * 1.0...HighBits
8351 // Scratch = 2^84 * 1.0...HighBits - 2^84 * 1.0 - 2^52 * 1.0
8352 // = - 2^52 * 1.0...HighBits
8353 // Result = - 2^52 * 1.0...HighBits + 2^52 * 1.0...LowBits
8354 auto TwoP52 = MIRBuilder.buildConstant(S64, UINT64_C(0x4330000000000000));
8355 auto TwoP84 = MIRBuilder.buildConstant(S64, UINT64_C(0x4530000000000000));
8356 auto TwoP52P84 = llvm::bit_cast<double>(UINT64_C(0x4530000000100000));
8357 auto TwoP52P84FP = MIRBuilder.buildFConstant(S64, TwoP52P84);
8358 auto HalfWidth = MIRBuilder.buildConstant(S64, 32);
8359
8360 auto LowBits = MIRBuilder.buildTrunc(S32, Src);
8361 LowBits = MIRBuilder.buildZExt(S64, LowBits);
8362 auto LowBitsFP = MIRBuilder.buildOr(S64, TwoP52, LowBits);
8363 auto HighBits = MIRBuilder.buildLShr(S64, Src, HalfWidth);
8364 auto HighBitsFP = MIRBuilder.buildOr(S64, TwoP84, HighBits);
8365 auto Scratch = MIRBuilder.buildFSub(S64, HighBitsFP, TwoP52P84FP);
8366 MIRBuilder.buildFAdd(Dst, Scratch, LowBitsFP);
8367
8368 MI.eraseFromParent();
8369 return Legalized;
8370}
8371
8372/// i64->fp16 itofp can be lowered to i64->f64,f64->f32,f32->f16. We cannot
8373/// convert fpround f64->f16 without double-rounding, so we manually perform the
8374/// lowering here where we know it is valid.
8377 LLT SrcTy, MachineIRBuilder &MIRBuilder) {
8378 auto DstFpTy =
8379 SrcTy.changeElementType(LLT::floatIEEE(SrcTy.getScalarSizeInBits()));
8380 auto M1 = MI.getOpcode() == TargetOpcode::G_UITOFP
8381 ? MIRBuilder.buildUITOFP(DstFpTy, Src)
8382 : MIRBuilder.buildSITOFP(DstFpTy, Src);
8383 LLT F32Ty = DstFpTy.changeElementSize(32);
8384 auto M2 = MIRBuilder.buildFPTrunc(F32Ty, M1);
8385 MIRBuilder.buildFPTrunc(Dst, M2);
8386 MI.eraseFromParent();
8388}
8389
8391 auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
8392
8393 if (SrcTy == LLT::scalar(1)) {
8394 auto True = MIRBuilder.buildFConstant(DstTy, 1.0);
8395 auto False = MIRBuilder.buildFConstant(DstTy, 0.0);
8396 MIRBuilder.buildSelect(Dst, Src, True, False);
8397 MI.eraseFromParent();
8398 return Legalized;
8399 }
8400
8401 if (DstTy.getScalarSizeInBits() == 16 && SrcTy.getScalarSizeInBits() == 64)
8402 return loweri64tof16ITOFP(MI, Dst, DstTy, Src, SrcTy, MIRBuilder);
8403
8404 if (SrcTy != LLT::scalar(64))
8405 return UnableToLegalize;
8406
8407 if (DstTy == LLT::scalar(32))
8408 // TODO: SelectionDAG has several alternative expansions to port which may
8409 // be more reasonable depending on the available instructions. We also need
8410 // a more advanced mechanism to choose an optimal version depending on
8411 // target features such as sitofp or CTLZ availability.
8413
8414 if (DstTy == LLT::scalar(64))
8416
8417 return UnableToLegalize;
8418}
8419
8421 auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
8422
8423 const LLT I64 = LLT::integer(64);
8424 const LLT I32 = LLT::integer(32);
8425 const LLT I1 = LLT::integer(1);
8426
8427 if (SrcTy == I1) {
8428 auto True = MIRBuilder.buildFConstant(DstTy, -1.0);
8429 auto False = MIRBuilder.buildFConstant(DstTy, 0.0);
8430 MIRBuilder.buildSelect(Dst, Src, True, False);
8431 MI.eraseFromParent();
8432 return Legalized;
8433 }
8434
8435 if (DstTy.getScalarSizeInBits() == 16 && SrcTy.getScalarSizeInBits() == 64)
8436 return loweri64tof16ITOFP(MI, Dst, DstTy, Src, SrcTy, MIRBuilder);
8437
8438 if (SrcTy != I64)
8439 return UnableToLegalize;
8440
8441 if (DstTy.getScalarSizeInBits() == 32) {
8442 // signed cl2f(long l) {
8443 // long s = l >> 63;
8444 // float r = cul2f((l + s) ^ s);
8445 // return s ? -r : r;
8446 // }
8447 Register L = Src;
8448 auto SignBit = MIRBuilder.buildConstant(I64, 63);
8449 auto S = MIRBuilder.buildAShr(I64, L, SignBit);
8450
8451 auto LPlusS = MIRBuilder.buildAdd(I64, L, S);
8452 auto Xor = MIRBuilder.buildXor(I64, LPlusS, S);
8453 auto R = MIRBuilder.buildUITOFP(I32, Xor);
8454
8455 auto RNeg = MIRBuilder.buildFNeg(I32, R);
8456 auto SignNotZero = MIRBuilder.buildICmp(CmpInst::ICMP_NE, I1, S,
8457 MIRBuilder.buildConstant(I64, 0));
8458 MIRBuilder.buildSelect(Dst, SignNotZero, RNeg, R);
8459 MI.eraseFromParent();
8460 return Legalized;
8461 }
8462
8463 return UnableToLegalize;
8464}
8465
8467 auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
8468 const LLT S64 = LLT::scalar(64);
8469 const LLT S32 = LLT::scalar(32);
8470
8471 if (SrcTy != S64 && SrcTy != S32)
8472 return UnableToLegalize;
8473 if (DstTy != S32 && DstTy != S64)
8474 return UnableToLegalize;
8475
8476 // FPTOSI gives same result as FPTOUI for positive signed integers.
8477 // FPTOUI needs to deal with fp values that convert to unsigned integers
8478 // greater or equal to 2^31 for float or 2^63 for double. For brevity 2^Exp.
8479
8480 APInt TwoPExpInt = APInt::getSignMask(DstTy.getSizeInBits());
8481 APFloat TwoPExpFP(SrcTy.getSizeInBits() == 32 ? APFloat::IEEEsingle()
8483 APInt::getZero(SrcTy.getSizeInBits()));
8484 TwoPExpFP.convertFromAPInt(TwoPExpInt, false, APFloat::rmNearestTiesToEven);
8485
8486 MachineInstrBuilder FPTOSI = MIRBuilder.buildFPTOSI(DstTy, Src);
8487
8488 MachineInstrBuilder Threshold = MIRBuilder.buildFConstant(SrcTy, TwoPExpFP);
8489 // For fp Value greater or equal to Threshold(2^Exp), we use FPTOSI on
8490 // (Value - 2^Exp) and add 2^Exp by setting highest bit in result to 1.
8491 MachineInstrBuilder FSub = MIRBuilder.buildFSub(SrcTy, Src, Threshold);
8492 MachineInstrBuilder ResLowBits = MIRBuilder.buildFPTOSI(DstTy, FSub);
8493 MachineInstrBuilder ResHighBit = MIRBuilder.buildConstant(DstTy, TwoPExpInt);
8494 MachineInstrBuilder Res = MIRBuilder.buildXor(DstTy, ResLowBits, ResHighBit);
8495
8496 const LLT S1 = LLT::scalar(1);
8497
8498 MachineInstrBuilder FCMP =
8499 MIRBuilder.buildFCmp(CmpInst::FCMP_ULT, S1, Src, Threshold);
8500 MIRBuilder.buildSelect(Dst, FCMP, FPTOSI, Res);
8501
8502 MI.eraseFromParent();
8503 return Legalized;
8504}
8505
8507 auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
8508 const LLT S64 = LLT::scalar(64);
8509 const LLT S32 = LLT::scalar(32);
8510
8511 // FIXME: Only f32 to i64 conversions are supported.
8512 if (SrcTy.getScalarType() != S32 || DstTy.getScalarType() != S64)
8513 return UnableToLegalize;
8514
8515 // Expand f32 -> i64 conversion
8516 // This algorithm comes from compiler-rt's implementation of fixsfdi:
8517 // https://github.com/llvm/llvm-project/blob/main/compiler-rt/lib/builtins/fixsfdi.c
8518
8519 unsigned SrcEltBits = SrcTy.getScalarSizeInBits();
8520
8521 auto ExponentMask = MIRBuilder.buildConstant(SrcTy, 0x7F800000);
8522 auto ExponentLoBit = MIRBuilder.buildConstant(SrcTy, 23);
8523
8524 auto AndExpMask = MIRBuilder.buildAnd(SrcTy, Src, ExponentMask);
8525 auto ExponentBits = MIRBuilder.buildLShr(SrcTy, AndExpMask, ExponentLoBit);
8526
8527 auto SignMask = MIRBuilder.buildConstant(SrcTy,
8528 APInt::getSignMask(SrcEltBits));
8529 auto AndSignMask = MIRBuilder.buildAnd(SrcTy, Src, SignMask);
8530 auto SignLowBit = MIRBuilder.buildConstant(SrcTy, SrcEltBits - 1);
8531 auto Sign = MIRBuilder.buildAShr(SrcTy, AndSignMask, SignLowBit);
8532 Sign = MIRBuilder.buildSExt(DstTy, Sign);
8533
8534 auto MantissaMask = MIRBuilder.buildConstant(SrcTy, 0x007FFFFF);
8535 auto AndMantissaMask = MIRBuilder.buildAnd(SrcTy, Src, MantissaMask);
8536 auto K = MIRBuilder.buildConstant(SrcTy, 0x00800000);
8537
8538 auto R = MIRBuilder.buildOr(SrcTy, AndMantissaMask, K);
8539 R = MIRBuilder.buildZExt(DstTy, R);
8540
8541 auto Bias = MIRBuilder.buildConstant(SrcTy, 127);
8542 auto Exponent = MIRBuilder.buildSub(SrcTy, ExponentBits, Bias);
8543 auto SubExponent = MIRBuilder.buildSub(SrcTy, Exponent, ExponentLoBit);
8544 auto ExponentSub = MIRBuilder.buildSub(SrcTy, ExponentLoBit, Exponent);
8545
8546 auto Shl = MIRBuilder.buildShl(DstTy, R, SubExponent);
8547 auto Srl = MIRBuilder.buildLShr(DstTy, R, ExponentSub);
8548
8549 const LLT S1 = LLT::scalar(1);
8550 auto CmpGt = MIRBuilder.buildICmp(CmpInst::ICMP_SGT,
8551 S1, Exponent, ExponentLoBit);
8552
8553 R = MIRBuilder.buildSelect(DstTy, CmpGt, Shl, Srl);
8554
8555 auto XorSign = MIRBuilder.buildXor(DstTy, R, Sign);
8556 auto Ret = MIRBuilder.buildSub(DstTy, XorSign, Sign);
8557
8558 auto ZeroSrcTy = MIRBuilder.buildConstant(SrcTy, 0);
8559
8560 auto ExponentLt0 = MIRBuilder.buildICmp(CmpInst::ICMP_SLT,
8561 S1, Exponent, ZeroSrcTy);
8562
8563 auto ZeroDstTy = MIRBuilder.buildConstant(DstTy, 0);
8564 MIRBuilder.buildSelect(Dst, ExponentLt0, ZeroDstTy, Ret);
8565
8566 MI.eraseFromParent();
8567 return Legalized;
8568}
8569
8572 auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
8573
8574 bool IsSigned = MI.getOpcode() == TargetOpcode::G_FPTOSI_SAT;
8575 unsigned SatWidth = DstTy.getScalarSizeInBits();
8576
8577 // Determine minimum and maximum integer values and their corresponding
8578 // floating-point values.
8579 APInt MinInt, MaxInt;
8580 if (IsSigned) {
8581 MinInt = APInt::getSignedMinValue(SatWidth);
8582 MaxInt = APInt::getSignedMaxValue(SatWidth);
8583 } else {
8584 MinInt = APInt::getMinValue(SatWidth);
8585 MaxInt = APInt::getMaxValue(SatWidth);
8586 }
8587
8588 const fltSemantics &Semantics = getFltSemanticForLLT(SrcTy.getScalarType());
8589 APFloat MinFloat(Semantics);
8590 APFloat MaxFloat(Semantics);
8591
8592 APFloat::opStatus MinStatus =
8593 MinFloat.convertFromAPInt(MinInt, IsSigned, APFloat::rmTowardZero);
8594 APFloat::opStatus MaxStatus =
8595 MaxFloat.convertFromAPInt(MaxInt, IsSigned, APFloat::rmTowardZero);
8596 bool AreExactFloatBounds = !(MinStatus & APFloat::opStatus::opInexact) &&
8597 !(MaxStatus & APFloat::opStatus::opInexact);
8598
8599 // If the integer bounds are exactly representable as floats, emit a
8600 // min+max+fptoi sequence. Otherwise we have to use a sequence of comparisons
8601 // and selects.
8602 if (AreExactFloatBounds) {
8603 // Clamp Src by MinFloat from below. If Src is NaN the result is MinFloat.
8604 auto MaxC = MIRBuilder.buildFConstant(SrcTy, MinFloat);
8605 auto MaxP =
8606 MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, LLT::integer(1), Src, MaxC);
8607 auto Max = MIRBuilder.buildSelect(SrcTy, MaxP, Src, MaxC);
8608 // Clamp by MaxFloat from above. NaN cannot occur.
8609 auto MinC = MIRBuilder.buildFConstant(SrcTy, MaxFloat);
8610 auto MinP = MIRBuilder.buildFCmp(CmpInst::FCMP_OLT, LLT::integer(1), Max,
8612 auto Min =
8613 MIRBuilder.buildSelect(SrcTy, MinP, Max, MinC, MachineInstr::FmNoNans);
8614 // Convert clamped value to integer. In the unsigned case we're done,
8615 // because we mapped NaN to MinFloat, which will cast to zero.
8616 if (!IsSigned) {
8617 MIRBuilder.buildFPTOUI(Dst, Min);
8618 MI.eraseFromParent();
8619 return Legalized;
8620 }
8621
8622 // Otherwise, select 0 if Src is NaN.
8623 auto FpToInt = MIRBuilder.buildFPTOSI(DstTy, Min);
8624 auto IsZero =
8625 MIRBuilder.buildFCmp(CmpInst::FCMP_UNO, LLT::integer(1), Src, Src);
8626 MIRBuilder.buildSelect(Dst, IsZero, MIRBuilder.buildConstant(DstTy, 0),
8627 FpToInt);
8628 MI.eraseFromParent();
8629 return Legalized;
8630 }
8631
8632 // Result of direct conversion. The assumption here is that the operation is
8633 // non-trapping and it's fine to apply it to an out-of-range value if we
8634 // select it away later.
8635 auto FpToInt = IsSigned ? MIRBuilder.buildFPTOSI(DstTy, Src)
8636 : MIRBuilder.buildFPTOUI(DstTy, Src);
8637
8638 // If Src ULT MinFloat, select MinInt. In particular, this also selects
8639 // MinInt if Src is NaN.
8640 auto ULT = MIRBuilder.buildFCmp(CmpInst::FCMP_ULT, LLT::integer(1), Src,
8641 MIRBuilder.buildFConstant(SrcTy, MinFloat));
8642 auto Max = MIRBuilder.buildSelect(
8643 DstTy, ULT, MIRBuilder.buildConstant(DstTy, MinInt), FpToInt);
8644 // If Src OGT MaxFloat, select MaxInt.
8645 auto OGT = MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, LLT::integer(1), Src,
8646 MIRBuilder.buildFConstant(SrcTy, MaxFloat));
8647
8648 // In the unsigned case we are done, because we mapped NaN to MinInt, which
8649 // is already zero.
8650 if (!IsSigned) {
8651 MIRBuilder.buildSelect(Dst, OGT, MIRBuilder.buildConstant(DstTy, MaxInt),
8652 Max);
8653 MI.eraseFromParent();
8654 return Legalized;
8655 }
8656
8657 // Otherwise, select 0 if Src is NaN.
8658 auto Min = MIRBuilder.buildSelect(
8659 DstTy, OGT, MIRBuilder.buildConstant(DstTy, MaxInt), Max);
8660 auto IsZero =
8661 MIRBuilder.buildFCmp(CmpInst::FCMP_UNO, LLT::integer(1), Src, Src);
8662 MIRBuilder.buildSelect(Dst, IsZero, MIRBuilder.buildConstant(DstTy, 0), Min);
8663 MI.eraseFromParent();
8664 return Legalized;
8665}
8666
8667// Floating-point conversions using truncating and extending loads and stores.
8670 assert((MI.getOpcode() == TargetOpcode::G_FPEXT ||
8671 MI.getOpcode() == TargetOpcode::G_FPTRUNC) &&
8672 "Only G_FPEXT and G_FPTRUNC are expected");
8673
8674 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
8675 MachinePointerInfo PtrInfo;
8676 unsigned StoreOpc;
8677 unsigned LoadOpc;
8678 LLT StackTy;
8679 if (MI.getOpcode() == TargetOpcode::G_FPEXT) {
8680 StackTy = SrcTy;
8681 StoreOpc = TargetOpcode::G_STORE;
8682 LoadOpc = TargetOpcode::G_FPEXTLOAD;
8683 } else {
8684 StackTy = DstTy;
8685 StoreOpc = TargetOpcode::G_FPTRUNCSTORE;
8686 LoadOpc = TargetOpcode::G_LOAD;
8687 }
8688
8689 Align StackTyAlign = getStackTemporaryAlignment(StackTy);
8690 auto StackTemp =
8691 createStackTemporary(StackTy.getSizeInBytes(), StackTyAlign, PtrInfo);
8692
8693 MachineFunction &MF = MIRBuilder.getMF();
8694 auto *StoreMMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
8695 StackTy, StackTyAlign);
8696 MIRBuilder.buildStoreInstr(StoreOpc, SrcReg, StackTemp, *StoreMMO);
8697
8698 auto *LoadMMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
8699 StackTy, StackTyAlign);
8700 MIRBuilder.buildLoadInstr(LoadOpc, DstReg, StackTemp, *LoadMMO);
8701
8702 MI.eraseFromParent();
8703 return Legalized;
8704}
8705
8706// f64 -> f16 conversion using round-to-nearest-even rounding mode.
8709 const LLT S1 = LLT::scalar(1);
8710 const LLT S32 = LLT::scalar(32);
8711
8712 auto [Dst, Src] = MI.getFirst2Regs();
8713 assert(MRI.getType(Dst).getScalarType() == LLT::float16() &&
8714 MRI.getType(Src).getScalarType() == LLT::float64());
8715
8716 if (MRI.getType(Src).isVector()) // TODO: Handle vectors directly.
8717 return UnableToLegalize;
8718
8719 if (MI.getFlag(MachineInstr::FmAfn)) {
8720 unsigned Flags = MI.getFlags();
8721 auto Src32 = MIRBuilder.buildFPTrunc(S32, Src, Flags);
8722 MIRBuilder.buildFPTrunc(Dst, Src32, Flags);
8723 MI.eraseFromParent();
8724 return Legalized;
8725 }
8726
8727 const unsigned ExpMask = 0x7ff;
8728 const unsigned ExpBiasf64 = 1023;
8729 const unsigned ExpBiasf16 = 15;
8730
8731 auto Unmerge = MIRBuilder.buildUnmerge(S32, Src);
8732 Register U = Unmerge.getReg(0);
8733 Register UH = Unmerge.getReg(1);
8734
8735 auto E = MIRBuilder.buildLShr(S32, UH, MIRBuilder.buildConstant(S32, 20));
8736 E = MIRBuilder.buildAnd(S32, E, MIRBuilder.buildConstant(S32, ExpMask));
8737
8738 // Subtract the fp64 exponent bias (1023) to get the real exponent and
8739 // add the f16 bias (15) to get the biased exponent for the f16 format.
8740 E = MIRBuilder.buildAdd(
8741 S32, E, MIRBuilder.buildConstant(S32, -ExpBiasf64 + ExpBiasf16));
8742
8743 auto M = MIRBuilder.buildLShr(S32, UH, MIRBuilder.buildConstant(S32, 8));
8744 M = MIRBuilder.buildAnd(S32, M, MIRBuilder.buildConstant(S32, 0xffe));
8745
8746 auto MaskedSig = MIRBuilder.buildAnd(S32, UH,
8747 MIRBuilder.buildConstant(S32, 0x1ff));
8748 MaskedSig = MIRBuilder.buildOr(S32, MaskedSig, U);
8749
8750 auto Zero = MIRBuilder.buildConstant(S32, 0);
8751 auto SigCmpNE0 = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, MaskedSig, Zero);
8752 auto Lo40Set = MIRBuilder.buildZExt(S32, SigCmpNE0);
8753 M = MIRBuilder.buildOr(S32, M, Lo40Set);
8754
8755 // (M != 0 ? 0x0200 : 0) | 0x7c00;
8756 auto Bits0x200 = MIRBuilder.buildConstant(S32, 0x0200);
8757 auto CmpM_NE0 = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, M, Zero);
8758 auto SelectCC = MIRBuilder.buildSelect(S32, CmpM_NE0, Bits0x200, Zero);
8759
8760 auto Bits0x7c00 = MIRBuilder.buildConstant(S32, 0x7c00);
8761 auto I = MIRBuilder.buildOr(S32, SelectCC, Bits0x7c00);
8762
8763 // N = M | (E << 12);
8764 auto EShl12 = MIRBuilder.buildShl(S32, E, MIRBuilder.buildConstant(S32, 12));
8765 auto N = MIRBuilder.buildOr(S32, M, EShl12);
8766
8767 // B = clamp(1-E, 0, 13);
8768 auto One = MIRBuilder.buildConstant(S32, 1);
8769 auto OneSubExp = MIRBuilder.buildSub(S32, One, E);
8770 auto B = MIRBuilder.buildSMax(S32, OneSubExp, Zero);
8771 B = MIRBuilder.buildSMin(S32, B, MIRBuilder.buildConstant(S32, 13));
8772
8773 auto SigSetHigh = MIRBuilder.buildOr(S32, M,
8774 MIRBuilder.buildConstant(S32, 0x1000));
8775
8776 auto D = MIRBuilder.buildLShr(S32, SigSetHigh, B);
8777 auto D0 = MIRBuilder.buildShl(S32, D, B);
8778
8779 auto D0_NE_SigSetHigh = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1,
8780 D0, SigSetHigh);
8781 auto D1 = MIRBuilder.buildZExt(S32, D0_NE_SigSetHigh);
8782 D = MIRBuilder.buildOr(S32, D, D1);
8783
8784 auto CmpELtOne = MIRBuilder.buildICmp(CmpInst::ICMP_SLT, S1, E, One);
8785 auto V = MIRBuilder.buildSelect(S32, CmpELtOne, D, N);
8786
8787 auto VLow3 = MIRBuilder.buildAnd(S32, V, MIRBuilder.buildConstant(S32, 7));
8788 V = MIRBuilder.buildLShr(S32, V, MIRBuilder.buildConstant(S32, 2));
8789
8790 auto VLow3Eq3 = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, S1, VLow3,
8791 MIRBuilder.buildConstant(S32, 3));
8792 auto V0 = MIRBuilder.buildZExt(S32, VLow3Eq3);
8793
8794 auto VLow3Gt5 = MIRBuilder.buildICmp(CmpInst::ICMP_SGT, S1, VLow3,
8795 MIRBuilder.buildConstant(S32, 5));
8796 auto V1 = MIRBuilder.buildZExt(S32, VLow3Gt5);
8797
8798 V1 = MIRBuilder.buildOr(S32, V0, V1);
8799 V = MIRBuilder.buildAdd(S32, V, V1);
8800
8801 auto CmpEGt30 = MIRBuilder.buildICmp(CmpInst::ICMP_SGT, S1,
8802 E, MIRBuilder.buildConstant(S32, 30));
8803 V = MIRBuilder.buildSelect(S32, CmpEGt30,
8804 MIRBuilder.buildConstant(S32, 0x7c00), V);
8805
8806 auto CmpEGt1039 = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, S1,
8807 E, MIRBuilder.buildConstant(S32, 1039));
8808 V = MIRBuilder.buildSelect(S32, CmpEGt1039, I, V);
8809
8810 // Extract the sign bit.
8811 auto Sign = MIRBuilder.buildLShr(S32, UH, MIRBuilder.buildConstant(S32, 16));
8812 Sign = MIRBuilder.buildAnd(S32, Sign, MIRBuilder.buildConstant(S32, 0x8000));
8813
8814 // Insert the sign bit
8815 V = MIRBuilder.buildOr(S32, Sign, V);
8816
8817 MIRBuilder.buildTrunc(Dst, V);
8818 MI.eraseFromParent();
8819 return Legalized;
8820}
8821
8822// f32 -> bf16 conversion using round-to-nearest-even rounding mode.
8825 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
8826 assert(DstTy.getScalarType() == LLT::bfloat16() &&
8827 SrcTy.getScalarType() == LLT::float32());
8828
8829 LLT I1Ty = SrcTy.changeElementType(LLT::integer(1));
8830 LLT I16Ty = SrcTy.changeElementType(LLT::integer(16));
8831 LLT I32Ty = SrcTy.changeElementType(LLT::integer(32));
8832
8833 auto IsNaN = MIRBuilder.buildFCmp(CmpInst::FCMP_UNO, I1Ty, SrcReg,
8834 MIRBuilder.buildFConstant(SrcTy, 0));
8835 auto SrcI = MIRBuilder.buildBitcast(I32Ty, SrcReg);
8836
8837 // Conversions should set NaN's quiet bit. This also prevents NaNs from
8838 // turning into infinities.
8839 auto NaN = MIRBuilder.buildOr(I32Ty, SrcI,
8840 MIRBuilder.buildConstant(I32Ty, 0x400000));
8841
8842 // Factor in the contribution of the low 16 bits.
8843 auto Lsb =
8844 MIRBuilder.buildLShr(I32Ty, SrcI, MIRBuilder.buildConstant(I32Ty, 16));
8845 Lsb = MIRBuilder.buildAnd(I32Ty, Lsb, MIRBuilder.buildConstant(I32Ty, 1));
8846 auto RoundingBias =
8847 MIRBuilder.buildAdd(I32Ty, Lsb, MIRBuilder.buildConstant(I32Ty, 0x7fff));
8848 auto Add = MIRBuilder.buildAdd(I32Ty, SrcI, RoundingBias);
8849
8850 // Don't round if we had a NaN, we don't want to turn 0x7fffffff into
8851 // 0x80000000.
8852 auto Sel = MIRBuilder.buildSelect(I32Ty, IsNaN, NaN, Add);
8853
8854 // Now that we have rounded, shift the bits into position.
8855 auto Srl =
8856 MIRBuilder.buildLShr(I32Ty, Sel, MIRBuilder.buildConstant(I32Ty, 16));
8857 auto Trunc = MIRBuilder.buildTrunc(I16Ty, Srl);
8858 MIRBuilder.buildBitcast(DstReg, Trunc);
8859 MI.eraseFromParent();
8860 return Legalized;
8861}
8862
8865 auto [DstTy, SrcTy] = MI.getFirst2LLTs();
8866 if (DstTy.getScalarType().isFloat16() && SrcTy.getScalarType().isFloat64())
8868
8869 if (DstTy.getScalarType().isBFloat16() && SrcTy.getScalarType().isFloat32())
8871
8872 return lowerFPExtAndTruncMem(MI);
8873}
8874
8876 auto [Dst, Src0, Src1] = MI.getFirst3Regs();
8877 LLT Ty = MRI.getType(Dst);
8878
8879 auto CvtSrc1 = MIRBuilder.buildSITOFP(Ty, Src1);
8880 MIRBuilder.buildFPow(Dst, Src0, CvtSrc1, MI.getFlags());
8881 MI.eraseFromParent();
8882 return Legalized;
8883}
8884
8886 auto [DstFrac, DstInt, Src] = MI.getFirst3Regs();
8887 LLT Ty = MRI.getType(Src);
8888 auto Flags = MI.getFlags();
8889 const LLT CondTy = Ty.changeElementType(LLT::integer(1));
8890
8891 auto IntPart = MIRBuilder.buildIntrinsicTrunc(Ty, Src, Flags);
8892 auto FracPart = MIRBuilder.buildFSub(Ty, Src, IntPart, Flags);
8893
8894 Register FracToUse;
8895 if (MI.getFlag(MachineInstr::FmNoInfs)) {
8896 FracToUse = FracPart.getReg(0);
8897 } else {
8898 auto Abs = MIRBuilder.buildFAbs(Ty, Src, Flags);
8899 const fltSemantics &Semantics = getFltSemanticForLLT(Ty.getScalarType());
8900 auto Inf = MIRBuilder.buildFConstant(Ty, APFloat::getInf(Semantics));
8901 auto IsInf = MIRBuilder.buildFCmp(CmpInst::FCMP_OEQ, CondTy, Abs, Inf);
8902 auto Zero = MIRBuilder.buildFConstant(Ty, 0.0);
8903 auto Select = MIRBuilder.buildSelect(Ty, IsInf, Zero, FracPart);
8904 FracToUse = Select.getReg(0);
8905 }
8906
8907 MIRBuilder.buildFCopysign(DstFrac, FracToUse, Src, Flags);
8908 MIRBuilder.buildCopy(DstInt, IntPart.getReg(0));
8909
8910 MI.eraseFromParent();
8911 return Legalized;
8912}
8913
8915 switch (Opc) {
8916 case TargetOpcode::G_SMIN:
8917 return CmpInst::ICMP_SLT;
8918 case TargetOpcode::G_SMAX:
8919 return CmpInst::ICMP_SGT;
8920 case TargetOpcode::G_UMIN:
8921 return CmpInst::ICMP_ULT;
8922 case TargetOpcode::G_UMAX:
8923 return CmpInst::ICMP_UGT;
8924 default:
8925 llvm_unreachable("not in integer min/max");
8926 }
8927}
8928
8930 auto [Dst, Src0, Src1] = MI.getFirst3Regs();
8931
8932 const CmpInst::Predicate Pred = minMaxToCompare(MI.getOpcode());
8933 LLT CmpType = MRI.getType(Dst).changeElementType(LLT::integer(1));
8934
8935 auto Cmp = MIRBuilder.buildICmp(Pred, CmpType, Src0, Src1);
8936 MIRBuilder.buildSelect(Dst, Cmp, Src0, Src1);
8937
8938 MI.eraseFromParent();
8939 return Legalized;
8940}
8941
8944 GSUCmp *Cmp = cast<GSUCmp>(&MI);
8945
8946 Register Dst = Cmp->getReg(0);
8947 LLT DstTy = MRI.getType(Dst);
8948 LLT SrcTy = MRI.getType(Cmp->getReg(1));
8949 LLT CmpTy = DstTy.changeElementSize(1);
8950
8951 CmpInst::Predicate LTPredicate = Cmp->isSigned()
8954 CmpInst::Predicate GTPredicate = Cmp->isSigned()
8957
8958 auto Zero = MIRBuilder.buildConstant(DstTy, 0);
8959 auto IsGT = MIRBuilder.buildICmp(GTPredicate, CmpTy, Cmp->getLHSReg(),
8960 Cmp->getRHSReg());
8961 auto IsLT = MIRBuilder.buildICmp(LTPredicate, CmpTy, Cmp->getLHSReg(),
8962 Cmp->getRHSReg());
8963
8964 auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
8965 auto BC = TLI.getBooleanContents(DstTy.isVector(), /*isFP=*/false);
8966 if (TLI.preferSelectsOverBooleanArithmetic(
8967 getApproximateEVTForLLT(SrcTy, Ctx)) ||
8969 auto One = MIRBuilder.buildConstant(DstTy, 1);
8970 auto SelectZeroOrOne = MIRBuilder.buildSelect(DstTy, IsGT, One, Zero);
8971
8972 auto MinusOne = MIRBuilder.buildConstant(DstTy, -1);
8973 MIRBuilder.buildSelect(Dst, IsLT, MinusOne, SelectZeroOrOne);
8974 } else {
8976 std::swap(IsGT, IsLT);
8977 // Extend boolean results to DstTy, which is at least i2, before subtracting
8978 // them.
8979 unsigned BoolExtOp =
8980 MIRBuilder.getBoolExtOp(DstTy.isVector(), /*isFP=*/false);
8981 IsGT = MIRBuilder.buildInstr(BoolExtOp, {DstTy}, {IsGT});
8982 IsLT = MIRBuilder.buildInstr(BoolExtOp, {DstTy}, {IsLT});
8983 MIRBuilder.buildSub(Dst, IsGT, IsLT);
8984 }
8985
8986 MI.eraseFromParent();
8987 return Legalized;
8988}
8989
8992 auto [Dst, DstTy, Src0, Src0Ty, Src1, Src1Ty] = MI.getFirst3RegLLTs();
8993 const int Src0Size = Src0Ty.getScalarSizeInBits();
8994 const int Src1Size = Src1Ty.getScalarSizeInBits();
8995
8996 LLT DstIntTy =
8997 DstTy.changeElementType(LLT::integer(DstTy.getScalarSizeInBits()));
8998 LLT Src0IntTy = Src0Ty.changeElementType(LLT::integer(Src0Size));
8999 LLT Src1IntTy = Src1Ty.changeElementType(LLT::integer(Src1Size));
9000
9001 Register Src0Int = Src0;
9002 Register Src1Int = Src1;
9003
9004 if (!(Src0Ty.getScalarType().isAnyScalar() ||
9005 Src0Ty.getScalarType().isInteger()))
9006 Src0Int = MIRBuilder.buildBitcast(Src0IntTy, Src0).getReg(0);
9007
9008 if (!(Src1Ty.getScalarType().isAnyScalar() ||
9009 Src1Ty.getScalarType().isInteger()))
9010 Src1Int = MIRBuilder.buildBitcast(Src1IntTy, Src1).getReg(0);
9011
9012 auto SignBitMask =
9013 MIRBuilder.buildConstant(Src0IntTy, APInt::getSignMask(Src0Size));
9014
9015 auto NotSignBitMask = MIRBuilder.buildConstant(
9016 Src0IntTy, APInt::getLowBitsSet(Src0Size, Src0Size - 1));
9017
9018 Register And0 =
9019 MIRBuilder.buildAnd(Src0IntTy, Src0Int, NotSignBitMask).getReg(0);
9020 Register And1;
9021 if (Src0Ty == Src1Ty) {
9022 And1 = MIRBuilder.buildAnd(Src1IntTy, Src1Int, SignBitMask).getReg(0);
9023 } else if (Src0Size > Src1Size) {
9024 auto ShiftAmt = MIRBuilder.buildConstant(Src0IntTy, Src0Size - Src1Size);
9025 auto Zext = MIRBuilder.buildZExt(Src0IntTy, Src1Int);
9026 auto Shift = MIRBuilder.buildShl(Src0IntTy, Zext, ShiftAmt);
9027 And1 = MIRBuilder.buildAnd(Src0Ty, Shift, SignBitMask).getReg(0);
9028 } else {
9029 auto ShiftAmt = MIRBuilder.buildConstant(Src1IntTy, Src1Size - Src0Size);
9030 auto Shift = MIRBuilder.buildLShr(Src1IntTy, Src1Int, ShiftAmt);
9031 auto Trunc = MIRBuilder.buildTrunc(Src0IntTy, Shift);
9032 And1 = MIRBuilder.buildAnd(Src0IntTy, Trunc, SignBitMask).getReg(0);
9033 }
9034
9035 // Be careful about setting nsz/nnan/ninf on every instruction, since the
9036 // constants are a nan and -0.0, but the final result should preserve
9037 // everything.
9038 unsigned Flags = MI.getFlags();
9039
9040 // We masked the sign bit and the not-sign bit, so these are disjoint.
9041 Flags |= MachineInstr::Disjoint;
9042
9043 if (DstTy == DstIntTy)
9044 MIRBuilder.buildOr(Dst, And0, And1, Flags).getReg(0);
9045 else {
9046 Register NewDst = MIRBuilder.buildOr(DstIntTy, And0, And1, Flags).getReg(0);
9047 MIRBuilder.buildBitcast(Dst, NewDst);
9048 }
9049
9050 MI.eraseFromParent();
9051 return Legalized;
9052}
9053
9056 // FIXME: fminnum/fmaxnum and fminimumnum/fmaximumnum should not have
9057 // identical handling. fminimumnum/fmaximumnum also need a path that do not
9058 // depend on fminnum/fmaxnum.
9059
9060 unsigned NewOp;
9061 switch (MI.getOpcode()) {
9062 case TargetOpcode::G_FMINNUM:
9063 NewOp = TargetOpcode::G_FMINNUM_IEEE;
9064 break;
9065 case TargetOpcode::G_FMINIMUMNUM:
9066 NewOp = TargetOpcode::G_FMINNUM;
9067 break;
9068 case TargetOpcode::G_FMAXNUM:
9069 NewOp = TargetOpcode::G_FMAXNUM_IEEE;
9070 break;
9071 case TargetOpcode::G_FMAXIMUMNUM:
9072 NewOp = TargetOpcode::G_FMAXNUM;
9073 break;
9074 default:
9075 llvm_unreachable("unexpected min/max opcode");
9076 }
9077
9078 auto [Dst, Src0, Src1] = MI.getFirst3Regs();
9079 LLT Ty = MRI.getType(Dst);
9080
9081 if (!MI.getFlag(MachineInstr::FmNoNans)) {
9082 // Insert canonicalizes if it's possible we need to quiet to get correct
9083 // sNaN behavior.
9084
9085 // Note this must be done here, and not as an optimization combine in the
9086 // absence of a dedicate quiet-snan instruction as we're using an
9087 // omni-purpose G_FCANONICALIZE.
9088 if (!VT->isKnownNeverSNaN(Src0))
9089 Src0 = MIRBuilder.buildFCanonicalize(Ty, Src0, MI.getFlags()).getReg(0);
9090
9091 if (!VT->isKnownNeverSNaN(Src1))
9092 Src1 = MIRBuilder.buildFCanonicalize(Ty, Src1, MI.getFlags()).getReg(0);
9093 }
9094
9095 // If there are no nans, it's safe to simply replace this with the non-IEEE
9096 // version.
9097 MIRBuilder.buildInstr(NewOp, {Dst}, {Src0, Src1}, MI.getFlags());
9098 MI.eraseFromParent();
9099 return Legalized;
9100}
9101
9104 unsigned Opc = MI.getOpcode();
9105 auto [Dst, Src0, Src1] = MI.getFirst3Regs();
9106 LLT Ty = MRI.getType(Dst);
9107 const LLT CmpTy = Ty.changeElementType(LLT::integer(1));
9108
9109 bool IsMax = (Opc == TargetOpcode::G_FMAXIMUM);
9110 unsigned OpcIeee =
9111 IsMax ? TargetOpcode::G_FMAXNUM_IEEE : TargetOpcode::G_FMINNUM_IEEE;
9112 unsigned OpcNonIeee =
9113 IsMax ? TargetOpcode::G_FMAXNUM : TargetOpcode::G_FMINNUM;
9114 bool MinMaxMustRespectOrderedZero = false;
9115 Register Res;
9116
9117 // IEEE variants don't need canonicalization
9118 if (LI.isLegalOrCustom({OpcIeee, Ty})) {
9119 Res = MIRBuilder.buildInstr(OpcIeee, {Ty}, {Src0, Src1}).getReg(0);
9120 MinMaxMustRespectOrderedZero = true;
9121 } else if (LI.isLegalOrCustom({OpcNonIeee, Ty})) {
9122 Res = MIRBuilder.buildInstr(OpcNonIeee, {Ty}, {Src0, Src1}).getReg(0);
9123 } else {
9124 auto Compare = MIRBuilder.buildFCmp(
9125 IsMax ? CmpInst::FCMP_OGT : CmpInst::FCMP_OLT, CmpTy, Src0, Src1);
9126 Res = MIRBuilder.buildSelect(Ty, Compare, Src0, Src1).getReg(0);
9127 }
9128
9129 // Propagate any NaN of both operands
9130 if (!MI.getFlag(MachineInstr::FmNoNans) &&
9131 (!VT->isKnownNeverNaN(Src0) || !VT->isKnownNeverNaN(Src1))) {
9132 auto IsOrdered = MIRBuilder.buildFCmp(CmpInst::FCMP_ORD, CmpTy, Src0, Src1);
9133
9134 LLT ElementTy = Ty.isScalar() ? Ty : Ty.getElementType();
9135 APFloat NaNValue = APFloat::getNaN(getFltSemanticForLLT(ElementTy));
9136 Register NaN = MIRBuilder.buildFConstant(ElementTy, NaNValue).getReg(0);
9137 if (Ty.isVector())
9138 NaN = MIRBuilder.buildSplatBuildVector(Ty, NaN).getReg(0);
9139
9140 Res = MIRBuilder.buildSelect(Ty, IsOrdered, Res, NaN).getReg(0);
9141 }
9142
9143 // fminimum/fmaximum requires -0.0 less than +0.0
9144 if (!MinMaxMustRespectOrderedZero && !MI.getFlag(MachineInstr::FmNsz)) {
9145 GISelValueTracking VT(MIRBuilder.getMF());
9146 KnownFPClass Src0Info = VT.computeKnownFPClass(Src0, fcZero);
9147 KnownFPClass Src1Info = VT.computeKnownFPClass(Src1, fcZero);
9148
9149 if (!Src0Info.isKnownNeverZero() && !Src1Info.isKnownNeverZero()) {
9150 const unsigned Flags = MI.getFlags();
9151 Register Zero = MIRBuilder.buildFConstant(Ty, 0.0).getReg(0);
9152 auto IsZero = MIRBuilder.buildFCmp(CmpInst::FCMP_OEQ, CmpTy, Res, Zero);
9153
9154 unsigned TestClass = IsMax ? fcPosZero : fcNegZero;
9155
9156 auto LHSTestZero = MIRBuilder.buildIsFPClass(CmpTy, Src0, TestClass);
9157 auto LHSSelect =
9158 MIRBuilder.buildSelect(Ty, LHSTestZero, Src0, Res, Flags);
9159
9160 auto RHSTestZero = MIRBuilder.buildIsFPClass(CmpTy, Src1, TestClass);
9161 auto RHSSelect =
9162 MIRBuilder.buildSelect(Ty, RHSTestZero, Src1, LHSSelect, Flags);
9163
9164 Res = MIRBuilder.buildSelect(Ty, IsZero, RHSSelect, Res, Flags).getReg(0);
9165 }
9166 }
9167
9168 MIRBuilder.buildCopy(Dst, Res);
9169 MI.eraseFromParent();
9170 return Legalized;
9171}
9172
9174 // Expand G_FMAD a, b, c -> G_FADD (G_FMUL a, b), c
9175 Register DstReg = MI.getOperand(0).getReg();
9176 LLT Ty = MRI.getType(DstReg);
9177 unsigned Flags = MI.getFlags();
9178
9179 auto Mul = MIRBuilder.buildFMul(Ty, MI.getOperand(1), MI.getOperand(2),
9180 Flags);
9181 MIRBuilder.buildFAdd(DstReg, Mul, MI.getOperand(3), Flags);
9182 MI.eraseFromParent();
9183 return Legalized;
9184}
9185
9188 auto [DstReg, X] = MI.getFirst2Regs();
9189 const unsigned Flags = MI.getFlags();
9190 const LLT Ty = MRI.getType(DstReg);
9191 const LLT CondTy = Ty.changeElementType(LLT::integer(1));
9192
9193 // round(x) =>
9194 // t = trunc(x);
9195 // d = fabs(x - t);
9196 // o = copysign(d >= 0.5 ? 1.0 : 0.0, x);
9197 // return t + o;
9198
9199 auto T = MIRBuilder.buildIntrinsicTrunc(Ty, X, Flags);
9200
9201 auto Diff = MIRBuilder.buildFSub(Ty, X, T, Flags);
9202 auto AbsDiff = MIRBuilder.buildFAbs(Ty, Diff, Flags);
9203
9204 auto Half = MIRBuilder.buildFConstant(Ty, 0.5);
9205 auto Cmp =
9206 MIRBuilder.buildFCmp(CmpInst::FCMP_OGE, CondTy, AbsDiff, Half, Flags);
9207
9208 // Could emit G_UITOFP instead
9209 auto One = MIRBuilder.buildFConstant(Ty, 1.0);
9210 auto Zero = MIRBuilder.buildFConstant(Ty, 0.0);
9211 auto BoolFP = MIRBuilder.buildSelect(Ty, Cmp, One, Zero);
9212 auto SignedOffset = MIRBuilder.buildFCopysign(Ty, BoolFP, X);
9213
9214 MIRBuilder.buildFAdd(DstReg, T, SignedOffset, Flags);
9215
9216 MI.eraseFromParent();
9217 return Legalized;
9218}
9219
9221 auto [DstReg, SrcReg] = MI.getFirst2Regs();
9222 unsigned Flags = MI.getFlags();
9223 LLT Ty = MRI.getType(DstReg);
9224 const LLT CondTy = Ty.changeElementType(LLT::integer(1));
9225
9226 // result = trunc(src);
9227 // if (src < 0.0 && src != result)
9228 // result += -1.0.
9229
9230 auto Trunc = MIRBuilder.buildIntrinsicTrunc(Ty, SrcReg, Flags);
9231 auto Zero = MIRBuilder.buildFConstant(Ty, 0.0);
9232
9233 auto Lt0 = MIRBuilder.buildFCmp(CmpInst::FCMP_OLT, CondTy,
9234 SrcReg, Zero, Flags);
9235 auto NeTrunc = MIRBuilder.buildFCmp(CmpInst::FCMP_ONE, CondTy,
9236 SrcReg, Trunc, Flags);
9237 auto And = MIRBuilder.buildAnd(CondTy, Lt0, NeTrunc);
9238 auto AddVal = MIRBuilder.buildSITOFP(Ty, And);
9239
9240 MIRBuilder.buildFAdd(DstReg, Trunc, AddVal, Flags);
9241 MI.eraseFromParent();
9242 return Legalized;
9243}
9244
9247 const unsigned NumOps = MI.getNumOperands();
9248 auto [DstReg, DstTy, Src0Reg, Src0Ty] = MI.getFirst2RegLLTs();
9249 unsigned PartSize = Src0Ty.getSizeInBits();
9250
9251 LLT WideTy = LLT::scalar(DstTy.getSizeInBits());
9252 Register ResultReg = MIRBuilder.buildZExt(WideTy, Src0Reg).getReg(0);
9253
9254 for (unsigned I = 2; I != NumOps; ++I) {
9255 const unsigned Offset = (I - 1) * PartSize;
9256
9257 Register SrcReg = MI.getOperand(I).getReg();
9258 auto ZextInput = MIRBuilder.buildZExt(WideTy, SrcReg);
9259
9260 Register NextResult = I + 1 == NumOps && WideTy == DstTy ? DstReg :
9261 MRI.createGenericVirtualRegister(WideTy);
9262
9263 auto ShiftAmt = MIRBuilder.buildConstant(WideTy, Offset);
9264 auto Shl = MIRBuilder.buildShl(WideTy, ZextInput, ShiftAmt);
9265 MIRBuilder.buildOr(NextResult, ResultReg, Shl);
9266 ResultReg = NextResult;
9267 }
9268
9269 if (DstTy.isPointer()) {
9270 if (MIRBuilder.getDataLayout().isNonIntegralAddressSpace(
9271 DstTy.getAddressSpace())) {
9272 LLVM_DEBUG(dbgs() << "Not casting nonintegral address space\n");
9273 return UnableToLegalize;
9274 }
9275
9276 MIRBuilder.buildIntToPtr(DstReg, ResultReg);
9277 }
9278
9279 MI.eraseFromParent();
9280 return Legalized;
9281}
9282
9285 const unsigned NumDst = MI.getNumOperands() - 1;
9286 Register SrcReg = MI.getOperand(NumDst).getReg();
9287 Register Dst0Reg = MI.getOperand(0).getReg();
9288 LLT DstTy = MRI.getType(Dst0Reg);
9289 if (DstTy.isPointer())
9290 return UnableToLegalize; // TODO
9291
9292 SrcReg = coerceToScalar(SrcReg);
9293 if (!SrcReg)
9294 return UnableToLegalize;
9295
9296 // Expand scalarizing unmerge as bitcast to integer and shift.
9297 LLT IntTy = MRI.getType(SrcReg);
9298
9299 MIRBuilder.buildTrunc(Dst0Reg, SrcReg);
9300
9301 const unsigned DstSize = DstTy.getSizeInBits();
9302 unsigned Offset = DstSize;
9303 for (unsigned I = 1; I != NumDst; ++I, Offset += DstSize) {
9304 auto ShiftAmt = MIRBuilder.buildConstant(IntTy, Offset);
9305 auto Shift = MIRBuilder.buildLShr(IntTy, SrcReg, ShiftAmt);
9306 MIRBuilder.buildTrunc(MI.getOperand(I), Shift);
9307 }
9308
9309 MI.eraseFromParent();
9310 return Legalized;
9311}
9312
9313/// Lower a vector extract or insert by writing the vector to a stack temporary
9314/// and reloading the element or vector.
9315///
9316/// %dst = G_EXTRACT_VECTOR_ELT %vec, %idx
9317/// =>
9318/// %stack_temp = G_FRAME_INDEX
9319/// G_STORE %vec, %stack_temp
9320/// %idx = clamp(%idx, %vec.getNumElements())
9321/// %element_ptr = G_PTR_ADD %stack_temp, %idx
9322/// %dst = G_LOAD %element_ptr
9325 Register DstReg = MI.getOperand(0).getReg();
9326 Register SrcVec = MI.getOperand(1).getReg();
9327 Register InsertVal;
9328 if (MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT)
9329 InsertVal = MI.getOperand(2).getReg();
9330
9331 Register Idx = MI.getOperand(MI.getNumOperands() - 1).getReg();
9332
9333 LLT VecTy = MRI.getType(SrcVec);
9334 LLT EltTy = VecTy.getElementType();
9335 unsigned NumElts = VecTy.getNumElements();
9336
9337 int64_t IdxVal;
9338 if (mi_match(Idx, MRI, m_ICst(IdxVal)) && IdxVal <= NumElts) {
9340 extractParts(SrcVec, EltTy, NumElts, SrcRegs, MIRBuilder, MRI);
9341
9342 if (InsertVal) {
9343 SrcRegs[IdxVal] = MI.getOperand(2).getReg();
9344 MIRBuilder.buildMergeLikeInstr(DstReg, SrcRegs);
9345 } else {
9346 MIRBuilder.buildCopy(DstReg, SrcRegs[IdxVal]);
9347 }
9348
9349 MI.eraseFromParent();
9350 return Legalized;
9351 }
9352
9353 if (!EltTy.isByteSized()) { // Not implemented.
9354 LLVM_DEBUG(dbgs() << "Can't handle non-byte element vectors yet\n");
9355 return UnableToLegalize;
9356 }
9357
9358 unsigned EltBytes = EltTy.getSizeInBytes();
9359 Align VecAlign = getStackTemporaryAlignment(VecTy);
9360 Align EltAlign;
9361
9362 MachinePointerInfo PtrInfo;
9363 auto StackTemp = createStackTemporary(
9364 TypeSize::getFixed(VecTy.getSizeInBytes()), VecAlign, PtrInfo);
9365 MIRBuilder.buildStore(SrcVec, StackTemp, PtrInfo, VecAlign);
9366
9367 // Get the pointer to the element, and be sure not to hit undefined behavior
9368 // if the index is out of bounds.
9369 Register EltPtr = getVectorElementPointer(StackTemp.getReg(0), VecTy, Idx);
9370
9371 if (mi_match(Idx, MRI, m_ICst(IdxVal))) {
9372 int64_t Offset = IdxVal * EltBytes;
9373 PtrInfo = PtrInfo.getWithOffset(Offset);
9374 EltAlign = commonAlignment(VecAlign, Offset);
9375 } else {
9376 // We lose information with a variable offset.
9377 EltAlign = getStackTemporaryAlignment(EltTy);
9378 PtrInfo = MachinePointerInfo(MRI.getType(EltPtr).getAddressSpace());
9379 }
9380
9381 if (InsertVal) {
9382 // Write the inserted element
9383 MIRBuilder.buildStore(InsertVal, EltPtr, PtrInfo, EltAlign);
9384
9385 // Reload the whole vector.
9386 MIRBuilder.buildLoad(DstReg, StackTemp, PtrInfo, VecAlign);
9387 } else {
9388 MIRBuilder.buildLoad(DstReg, EltPtr, PtrInfo, EltAlign);
9389 }
9390
9391 MI.eraseFromParent();
9392 return Legalized;
9393}
9394
9397 auto [DstReg, DstTy, Src0Reg, Src0Ty, Src1Reg, Src1Ty] =
9398 MI.getFirst3RegLLTs();
9399 LLT IdxTy = LLT::scalar(32);
9400
9401 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
9404 LLT EltTy = DstTy.getScalarType();
9405
9406 DenseMap<unsigned, Register> CachedExtract;
9407
9408 for (int Idx : Mask) {
9409 if (Idx < 0) {
9410 if (!Undef.isValid())
9411 Undef = MIRBuilder.buildUndef(EltTy).getReg(0);
9412 BuildVec.push_back(Undef);
9413 continue;
9414 }
9415
9416 assert(!Src0Ty.isScalar() && "Unexpected scalar G_SHUFFLE_VECTOR");
9417
9418 int NumElts = Src0Ty.getNumElements();
9419 Register SrcVec = Idx < NumElts ? Src0Reg : Src1Reg;
9420 int ExtractIdx = Idx < NumElts ? Idx : Idx - NumElts;
9421 auto [It, Inserted] = CachedExtract.try_emplace(Idx);
9422 if (Inserted) {
9423 auto IdxK = MIRBuilder.buildConstant(IdxTy, ExtractIdx);
9424 It->second =
9425 MIRBuilder.buildExtractVectorElement(EltTy, SrcVec, IdxK).getReg(0);
9426 }
9427 BuildVec.push_back(It->second);
9428 }
9429
9430 assert(DstTy.isVector() && "Unexpected scalar G_SHUFFLE_VECTOR");
9431 MIRBuilder.buildBuildVector(DstReg, BuildVec);
9432 MI.eraseFromParent();
9433 return Legalized;
9434}
9435
9438 auto [Dst, DstTy, Vec, VecTy, Mask, MaskTy, Passthru, PassthruTy] =
9439 MI.getFirst4RegLLTs();
9440
9441 if (VecTy.isScalableVector())
9442 report_fatal_error("Cannot expand masked_compress for scalable vectors.");
9443
9444 Align VecAlign = getStackTemporaryAlignment(VecTy);
9445 MachinePointerInfo PtrInfo;
9446 Register StackPtr =
9447 createStackTemporary(TypeSize::getFixed(VecTy.getSizeInBytes()), VecAlign,
9448 PtrInfo)
9449 .getReg(0);
9450 MachinePointerInfo ValPtrInfo =
9452
9453 LLT IdxTy = LLT::scalar(32);
9454 LLT ValTy = VecTy.getElementType();
9455 Align ValAlign = getStackTemporaryAlignment(ValTy);
9456
9457 auto OutPos = MIRBuilder.buildConstant(IdxTy, 0);
9458
9459 bool HasPassthru =
9460 MRI.getVRegDef(Passthru)->getOpcode() != TargetOpcode::G_IMPLICIT_DEF;
9461
9462 if (HasPassthru)
9463 MIRBuilder.buildStore(Passthru, StackPtr, PtrInfo, VecAlign);
9464
9465 Register LastWriteVal;
9466 std::optional<APInt> PassthruSplatVal =
9467 isConstantOrConstantSplatVector(*MRI.getVRegDef(Passthru), MRI);
9468
9469 if (PassthruSplatVal.has_value()) {
9470 LastWriteVal =
9471 MIRBuilder.buildConstant(ValTy, PassthruSplatVal.value()).getReg(0);
9472 } else if (HasPassthru) {
9473 auto Popcount = MIRBuilder.buildZExt(MaskTy.changeElementSize(32), Mask);
9474 Popcount = MIRBuilder.buildInstr(TargetOpcode::G_VECREDUCE_ADD,
9475 {LLT::scalar(32)}, {Popcount});
9476
9477 Register LastElmtPtr =
9478 getVectorElementPointer(StackPtr, VecTy, Popcount.getReg(0));
9479 LastWriteVal =
9480 MIRBuilder.buildLoad(ValTy, LastElmtPtr, ValPtrInfo, ValAlign)
9481 .getReg(0);
9482 }
9483
9484 unsigned NumElmts = VecTy.getNumElements();
9485 for (unsigned I = 0; I < NumElmts; ++I) {
9486 auto Idx = MIRBuilder.buildConstant(IdxTy, I);
9487 auto Val = MIRBuilder.buildExtractVectorElement(ValTy, Vec, Idx);
9488 Register ElmtPtr =
9489 getVectorElementPointer(StackPtr, VecTy, OutPos.getReg(0));
9490 MIRBuilder.buildStore(Val, ElmtPtr, ValPtrInfo, ValAlign);
9491
9492 LLT MaskITy = MaskTy.getElementType();
9493 auto MaskI = MIRBuilder.buildExtractVectorElement(MaskITy, Mask, Idx);
9494 if (MaskITy.getSizeInBits() > 1)
9495 MaskI = MIRBuilder.buildTrunc(LLT::scalar(1), MaskI);
9496
9497 MaskI = MIRBuilder.buildZExt(IdxTy, MaskI);
9498 OutPos = MIRBuilder.buildAdd(IdxTy, OutPos, MaskI);
9499
9500 if (HasPassthru && I == NumElmts - 1) {
9501 auto EndOfVector =
9502 MIRBuilder.buildConstant(IdxTy, VecTy.getNumElements() - 1);
9503 auto AllLanesSelected = MIRBuilder.buildICmp(
9504 CmpInst::ICMP_UGT, LLT::scalar(1), OutPos, EndOfVector);
9505 OutPos = MIRBuilder.buildInstr(TargetOpcode::G_UMIN, {IdxTy},
9506 {OutPos, EndOfVector});
9507 ElmtPtr = getVectorElementPointer(StackPtr, VecTy, OutPos.getReg(0));
9508
9509 LastWriteVal =
9510 MIRBuilder.buildSelect(ValTy, AllLanesSelected, Val, LastWriteVal)
9511 .getReg(0);
9512 MIRBuilder.buildStore(LastWriteVal, ElmtPtr, ValPtrInfo, ValAlign);
9513 }
9514 }
9515
9516 // TODO: Use StackPtr's FrameIndex alignment.
9517 MIRBuilder.buildLoad(Dst, StackPtr, PtrInfo, VecAlign);
9518
9519 MI.eraseFromParent();
9520 return Legalized;
9521}
9522
9524 Register AllocSize,
9525 Align Alignment,
9526 LLT PtrTy) {
9527 LLT IntPtrTy = LLT::integer(PtrTy.getSizeInBits());
9528
9529 auto SPTmp = MIRBuilder.buildCopy(PtrTy, SPReg);
9530 SPTmp = MIRBuilder.buildCast(IntPtrTy, SPTmp);
9531
9532 // Subtract the final alloc from the SP. We use G_PTRTOINT here so we don't
9533 // have to generate an extra instruction to negate the alloc and then use
9534 // G_PTR_ADD to add the negative offset.
9535 auto Alloc = MIRBuilder.buildSub(IntPtrTy, SPTmp, AllocSize);
9536 if (Alignment > Align(1)) {
9537 APInt AlignMask(IntPtrTy.getSizeInBits(), Alignment.value(), true);
9538 AlignMask.negate();
9539 auto AlignCst = MIRBuilder.buildConstant(IntPtrTy, AlignMask);
9540 Alloc = MIRBuilder.buildAnd(IntPtrTy, Alloc, AlignCst);
9541 }
9542
9543 return MIRBuilder.buildCast(PtrTy, Alloc).getReg(0);
9544}
9545
9548 const auto &MF = *MI.getMF();
9549 const auto &TFI = *MF.getSubtarget().getFrameLowering();
9550 if (TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp)
9551 return UnableToLegalize;
9552
9553 Register Dst = MI.getOperand(0).getReg();
9554 Register AllocSize = MI.getOperand(1).getReg();
9555 Align Alignment = assumeAligned(MI.getOperand(2).getImm());
9556
9557 LLT PtrTy = MRI.getType(Dst);
9558 Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
9559 Register SPTmp =
9560 getDynStackAllocTargetPtr(SPReg, AllocSize, Alignment, PtrTy);
9561
9562 MIRBuilder.buildCopy(SPReg, SPTmp);
9563 MIRBuilder.buildCopy(Dst, SPTmp);
9564
9565 MI.eraseFromParent();
9566 return Legalized;
9567}
9568
9571 Register StackPtr = TLI.getStackPointerRegisterToSaveRestore();
9572 if (!StackPtr)
9573 return UnableToLegalize;
9574
9575 MIRBuilder.buildCopy(MI.getOperand(0), StackPtr);
9576 MI.eraseFromParent();
9577 return Legalized;
9578}
9579
9582 Register StackPtr = TLI.getStackPointerRegisterToSaveRestore();
9583 if (!StackPtr)
9584 return UnableToLegalize;
9585
9586 MIRBuilder.buildCopy(StackPtr, MI.getOperand(0));
9587 MI.eraseFromParent();
9588 return Legalized;
9589}
9590
9593 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
9594 unsigned Offset = MI.getOperand(2).getImm();
9595
9596 // Extract sub-vector or one element
9597 if (SrcTy.isVector()) {
9598 unsigned SrcEltSize = SrcTy.getElementType().getSizeInBits();
9599 unsigned DstSize = DstTy.getSizeInBits();
9600
9601 if ((Offset % SrcEltSize == 0) && (DstSize % SrcEltSize == 0) &&
9602 (Offset + DstSize <= SrcTy.getSizeInBits())) {
9603 // Unmerge and allow access to each Src element for the artifact combiner.
9604 auto Unmerge = MIRBuilder.buildUnmerge(SrcTy.getElementType(), SrcReg);
9605
9606 // Take element(s) we need to extract and copy it (merge them).
9607 SmallVector<Register, 8> SubVectorElts;
9608 for (unsigned Idx = Offset / SrcEltSize;
9609 Idx < (Offset + DstSize) / SrcEltSize; ++Idx) {
9610 SubVectorElts.push_back(Unmerge.getReg(Idx));
9611 }
9612 if (SubVectorElts.size() == 1)
9613 MIRBuilder.buildCopy(DstReg, SubVectorElts[0]);
9614 else
9615 MIRBuilder.buildMergeLikeInstr(DstReg, SubVectorElts);
9616
9617 MI.eraseFromParent();
9618 return Legalized;
9619 }
9620 }
9621
9622 const DataLayout &DL = MIRBuilder.getDataLayout();
9623 if ((SrcTy.isPointer() &&
9624 DL.isNonIntegralAddressSpace(SrcTy.getAddressSpace())) ||
9625 (DstTy.isPointer() &&
9626 DL.isNonIntegralAddressSpace(DstTy.getAddressSpace()))) {
9627 LLVM_DEBUG(dbgs() << "Not casting non-integral address space integer\n");
9628 return UnableToLegalize;
9629 }
9630
9631 if ((DstTy.isScalar() || DstTy.isPointer()) &&
9632 (SrcTy.isScalar() || SrcTy.isPointer() ||
9633 (SrcTy.isVector() && DstTy == SrcTy.getElementType()))) {
9634 LLT SrcIntTy = SrcTy;
9635 if (!SrcTy.isScalar()) {
9636 SrcIntTy = LLT::scalar(SrcTy.getSizeInBits());
9637 SrcReg = MIRBuilder.buildCast(SrcIntTy, SrcReg).getReg(0);
9638 }
9639
9640 Register ResultReg = DstReg;
9641 if (DstTy.isPointer())
9642 ResultReg =
9643 MRI.createGenericVirtualRegister(LLT::scalar(DstTy.getSizeInBits()));
9644
9645 if (Offset == 0)
9646 MIRBuilder.buildTrunc(ResultReg, SrcReg);
9647 else {
9648 auto ShiftAmt = MIRBuilder.buildConstant(SrcIntTy, Offset);
9649 auto Shr = MIRBuilder.buildLShr(SrcIntTy, SrcReg, ShiftAmt);
9650 MIRBuilder.buildTrunc(ResultReg, Shr);
9651 }
9652
9653 if (DstTy.isPointer())
9654 MIRBuilder.buildIntToPtr(DstReg, ResultReg);
9655
9656 MI.eraseFromParent();
9657 return Legalized;
9658 }
9659
9660 return UnableToLegalize;
9661}
9662
9664 auto [Dst, Src, InsertSrc] = MI.getFirst3Regs();
9665 uint64_t Offset = MI.getOperand(3).getImm();
9666
9667 LLT DstTy = MRI.getType(Src);
9668 LLT InsertTy = MRI.getType(InsertSrc);
9669
9670 const DataLayout &DL = MIRBuilder.getDataLayout();
9671 bool IsNonIntegralInsert =
9672 InsertTy.isPointerOrPointerVector() &&
9673 DL.isNonIntegralAddressSpace(InsertTy.getAddressSpace());
9674 bool IsNonIntegralDst = DstTy.isPointerOrPointerVector() &&
9675 DL.isNonIntegralAddressSpace(DstTy.getAddressSpace());
9676
9677 // Insert sub-vector or one element
9678 if (DstTy.isVector()) {
9679 LLT EltTy = DstTy.getElementType();
9680
9681 if ((IsNonIntegralInsert || IsNonIntegralDst) && InsertTy != EltTy) {
9682 LLVM_DEBUG(dbgs() << "Not casting non-integral address space integer\n");
9683 return UnableToLegalize;
9684 }
9685
9686 unsigned EltSize = EltTy.getSizeInBits();
9687 unsigned InsertSize = InsertTy.getSizeInBits();
9688
9689 if ((Offset % EltSize == 0) && (InsertSize % EltSize == 0) &&
9690 (Offset + InsertSize <= DstTy.getSizeInBits())) {
9691 auto UnmergeSrc = MIRBuilder.buildUnmerge(EltTy, Src);
9693 unsigned Idx = 0;
9694 // Elements from Src before insert start Offset
9695 for (; Idx < Offset / EltSize; ++Idx) {
9696 DstElts.push_back(UnmergeSrc.getReg(Idx));
9697 }
9698
9699 // Replace elements in Src with elements from InsertSrc
9700 if (InsertTy.getSizeInBits() > EltSize) {
9701 auto UnmergeInsertSrc = MIRBuilder.buildUnmerge(EltTy, InsertSrc);
9702 for (unsigned i = 0; Idx < (Offset + InsertSize) / EltSize;
9703 ++Idx, ++i) {
9704 DstElts.push_back(UnmergeInsertSrc.getReg(i));
9705 }
9706 } else {
9707 if (InsertTy.isPointer() && !EltTy.isPointer())
9708 InsertSrc = MIRBuilder.buildPtrToInt(EltTy, InsertSrc).getReg(0);
9709 else if (!InsertTy.isPointer() && EltTy.isPointer())
9710 InsertSrc = MIRBuilder.buildIntToPtr(EltTy, InsertSrc).getReg(0);
9711 DstElts.push_back(InsertSrc);
9712 ++Idx;
9713 }
9714
9715 // Remaining elements from Src after insert
9716 for (; Idx < DstTy.getNumElements(); ++Idx) {
9717 DstElts.push_back(UnmergeSrc.getReg(Idx));
9718 }
9719
9720 MIRBuilder.buildMergeLikeInstr(Dst, DstElts);
9721 MI.eraseFromParent();
9722 return Legalized;
9723 }
9724 }
9725
9726 if (InsertTy.isVector() ||
9727 (DstTy.isVector() && DstTy.getElementType() != InsertTy))
9728 return UnableToLegalize;
9729
9730 if (IsNonIntegralDst || IsNonIntegralInsert) {
9731 LLVM_DEBUG(dbgs() << "Not casting non-integral address space integer\n");
9732 return UnableToLegalize;
9733 }
9734
9735 LLT IntDstTy = DstTy;
9736
9737 if (!DstTy.isScalar()) {
9738 IntDstTy = LLT::scalar(DstTy.getSizeInBits());
9739 Src = MIRBuilder.buildCast(IntDstTy, Src).getReg(0);
9740 }
9741
9742 if (!InsertTy.isScalar()) {
9743 const LLT IntInsertTy = LLT::scalar(InsertTy.getSizeInBits());
9744 InsertSrc = MIRBuilder.buildPtrToInt(IntInsertTy, InsertSrc).getReg(0);
9745 }
9746
9747 Register ExtInsSrc = MIRBuilder.buildZExt(IntDstTy, InsertSrc).getReg(0);
9748 if (Offset != 0) {
9749 auto ShiftAmt = MIRBuilder.buildConstant(IntDstTy, Offset);
9750 ExtInsSrc = MIRBuilder.buildShl(IntDstTy, ExtInsSrc, ShiftAmt).getReg(0);
9751 }
9752
9754 DstTy.getSizeInBits(), Offset + InsertTy.getSizeInBits(), Offset);
9755
9756 auto Mask = MIRBuilder.buildConstant(IntDstTy, MaskVal);
9757 auto MaskedSrc = MIRBuilder.buildAnd(IntDstTy, Src, Mask);
9758 auto Or = MIRBuilder.buildOr(IntDstTy, MaskedSrc, ExtInsSrc);
9759
9760 MIRBuilder.buildCast(Dst, Or);
9761 MI.eraseFromParent();
9762 return Legalized;
9763}
9764
9767 auto [Dst0, Dst0Ty, Dst1, Dst1Ty, LHS, LHSTy, RHS, RHSTy] =
9768 MI.getFirst4RegLLTs();
9769 const bool IsAdd = MI.getOpcode() == TargetOpcode::G_SADDO;
9770
9771 LLT Ty = Dst0Ty;
9772 LLT BoolTy = Dst1Ty;
9773
9774 Register NewDst0 = MRI.cloneVirtualRegister(Dst0);
9775
9776 if (IsAdd)
9777 MIRBuilder.buildAdd(NewDst0, LHS, RHS);
9778 else
9779 MIRBuilder.buildSub(NewDst0, LHS, RHS);
9780
9781 // TODO: If SADDSAT/SSUBSAT is legal, compare results to detect overflow.
9782
9783 auto Zero = MIRBuilder.buildConstant(Ty, 0);
9784
9785 if (IsAdd) {
9786 // For an addition, the result should be less than one of the operands (LHS)
9787 // if and only if the other operand (RHS) is negative, otherwise there will
9788 // be overflow.
9789 auto ResultLowerThanLHS =
9790 MIRBuilder.buildICmp(CmpInst::ICMP_SLT, BoolTy, NewDst0, LHS);
9791 auto RHSNegative =
9792 MIRBuilder.buildICmp(CmpInst::ICMP_SLT, BoolTy, RHS, Zero);
9793 MIRBuilder.buildXor(Dst1, RHSNegative, ResultLowerThanLHS);
9794 } else {
9795 // For subtraction, overflow occurs when the signed comparison of operands
9796 // doesn't match the sign of the result.
9797 auto LHSLessThanRHS =
9798 MIRBuilder.buildICmp(CmpInst::ICMP_SLT, BoolTy, LHS, RHS);
9799 auto ResultNegative =
9800 MIRBuilder.buildICmp(CmpInst::ICMP_SLT, BoolTy, NewDst0, Zero);
9801 MIRBuilder.buildXor(Dst1, LHSLessThanRHS, ResultNegative);
9802 }
9803
9804 MIRBuilder.buildCopy(Dst0, NewDst0);
9805 MI.eraseFromParent();
9806
9807 return Legalized;
9808}
9809
9811 auto [Res, OvOut, LHS, RHS, CarryIn] = MI.getFirst5Regs();
9812 const LLT Ty = MRI.getType(Res);
9813
9814 // sum = LHS + RHS + zext(CarryIn)
9815 auto Tmp = MIRBuilder.buildAdd(Ty, LHS, RHS);
9816 auto CarryZ = MIRBuilder.buildZExt(Ty, CarryIn);
9817 auto Sum = MIRBuilder.buildAdd(Ty, Tmp, CarryZ);
9818 MIRBuilder.buildCopy(Res, Sum);
9819
9820 // OvOut = icmp slt ((sum ^ lhs) & (sum ^ rhs)), 0
9821 auto AX = MIRBuilder.buildXor(Ty, Sum, LHS);
9822 auto BX = MIRBuilder.buildXor(Ty, Sum, RHS);
9823 auto T = MIRBuilder.buildAnd(Ty, AX, BX);
9824
9825 auto Zero = MIRBuilder.buildConstant(Ty, 0);
9826 MIRBuilder.buildICmp(CmpInst::ICMP_SLT, OvOut, T, Zero);
9827
9828 MI.eraseFromParent();
9829 return Legalized;
9830}
9831
9833 auto [Res, OvOut, LHS, RHS, CarryIn] = MI.getFirst5Regs();
9834 const LLT Ty = MRI.getType(Res);
9835
9836 // Diff = LHS - (RHS + zext(CarryIn))
9837 auto CarryZ = MIRBuilder.buildZExt(Ty, CarryIn);
9838 auto RHSPlusCI = MIRBuilder.buildAdd(Ty, RHS, CarryZ);
9839 auto Diff = MIRBuilder.buildSub(Ty, LHS, RHSPlusCI);
9840 MIRBuilder.buildCopy(Res, Diff);
9841
9842 // ov = msb((LHS ^ RHS) & (LHS ^ Diff))
9843 auto X1 = MIRBuilder.buildXor(Ty, LHS, RHS);
9844 auto X2 = MIRBuilder.buildXor(Ty, LHS, Diff);
9845 auto T = MIRBuilder.buildAnd(Ty, X1, X2);
9846 auto Zero = MIRBuilder.buildConstant(Ty, 0);
9847 MIRBuilder.buildICmp(CmpInst::ICMP_SLT, OvOut, T, Zero);
9848
9849 MI.eraseFromParent();
9850 return Legalized;
9851}
9852
9855 auto [Res, LHS, RHS] = MI.getFirst3Regs();
9856 LLT Ty = MRI.getType(Res);
9857 bool IsSigned;
9858 bool IsAdd;
9859 unsigned BaseOp;
9860 switch (MI.getOpcode()) {
9861 default:
9862 llvm_unreachable("unexpected addsat/subsat opcode");
9863 case TargetOpcode::G_UADDSAT:
9864 IsSigned = false;
9865 IsAdd = true;
9866 BaseOp = TargetOpcode::G_ADD;
9867 break;
9868 case TargetOpcode::G_SADDSAT:
9869 IsSigned = true;
9870 IsAdd = true;
9871 BaseOp = TargetOpcode::G_ADD;
9872 break;
9873 case TargetOpcode::G_USUBSAT:
9874 IsSigned = false;
9875 IsAdd = false;
9876 BaseOp = TargetOpcode::G_SUB;
9877 break;
9878 case TargetOpcode::G_SSUBSAT:
9879 IsSigned = true;
9880 IsAdd = false;
9881 BaseOp = TargetOpcode::G_SUB;
9882 break;
9883 }
9884
9885 if (IsSigned) {
9886 // sadd.sat(a, b) ->
9887 // hi = 0x7fffffff - smax(a, 0)
9888 // lo = 0x80000000 - smin(a, 0)
9889 // a + smin(smax(lo, b), hi)
9890 // ssub.sat(a, b) ->
9891 // lo = smax(a, -1) - 0x7fffffff
9892 // hi = smin(a, -1) - 0x80000000
9893 // a - smin(smax(lo, b), hi)
9894 // TODO: AMDGPU can use a "median of 3" instruction here:
9895 // a +/- med3(lo, b, hi)
9896 uint64_t NumBits = Ty.getScalarSizeInBits();
9897 auto MaxVal =
9898 MIRBuilder.buildConstant(Ty, APInt::getSignedMaxValue(NumBits));
9899 auto MinVal =
9900 MIRBuilder.buildConstant(Ty, APInt::getSignedMinValue(NumBits));
9902 if (IsAdd) {
9903 auto Zero = MIRBuilder.buildConstant(Ty, 0);
9904 Hi = MIRBuilder.buildSub(Ty, MaxVal, MIRBuilder.buildSMax(Ty, LHS, Zero));
9905 Lo = MIRBuilder.buildSub(Ty, MinVal, MIRBuilder.buildSMin(Ty, LHS, Zero));
9906 } else {
9907 auto NegOne = MIRBuilder.buildConstant(Ty, -1);
9908 Lo = MIRBuilder.buildSub(Ty, MIRBuilder.buildSMax(Ty, LHS, NegOne),
9909 MaxVal);
9910 Hi = MIRBuilder.buildSub(Ty, MIRBuilder.buildSMin(Ty, LHS, NegOne),
9911 MinVal);
9912 }
9913 auto RHSClamped =
9914 MIRBuilder.buildSMin(Ty, MIRBuilder.buildSMax(Ty, Lo, RHS), Hi);
9915 MIRBuilder.buildInstr(BaseOp, {Res}, {LHS, RHSClamped});
9916 } else {
9917 // uadd.sat(a, b) -> a + umin(~a, b)
9918 // usub.sat(a, b) -> a - umin(a, b)
9919 Register Not = IsAdd ? MIRBuilder.buildNot(Ty, LHS).getReg(0) : LHS;
9920 auto Min = MIRBuilder.buildUMin(Ty, Not, RHS);
9921 MIRBuilder.buildInstr(BaseOp, {Res}, {LHS, Min});
9922 }
9923
9924 MI.eraseFromParent();
9925 return Legalized;
9926}
9927
9930 auto [Res, LHS, RHS] = MI.getFirst3Regs();
9931 LLT Ty = MRI.getType(Res);
9932 LLT BoolTy = Ty.changeElementSize(1);
9933 bool IsSigned;
9934 bool IsAdd;
9935 unsigned OverflowOp;
9936 switch (MI.getOpcode()) {
9937 default:
9938 llvm_unreachable("unexpected addsat/subsat opcode");
9939 case TargetOpcode::G_UADDSAT:
9940 IsSigned = false;
9941 IsAdd = true;
9942 OverflowOp = TargetOpcode::G_UADDO;
9943 break;
9944 case TargetOpcode::G_SADDSAT:
9945 IsSigned = true;
9946 IsAdd = true;
9947 OverflowOp = TargetOpcode::G_SADDO;
9948 break;
9949 case TargetOpcode::G_USUBSAT:
9950 IsSigned = false;
9951 IsAdd = false;
9952 OverflowOp = TargetOpcode::G_USUBO;
9953 break;
9954 case TargetOpcode::G_SSUBSAT:
9955 IsSigned = true;
9956 IsAdd = false;
9957 OverflowOp = TargetOpcode::G_SSUBO;
9958 break;
9959 }
9960
9961 auto OverflowRes =
9962 MIRBuilder.buildInstr(OverflowOp, {Ty, BoolTy}, {LHS, RHS});
9963 Register Tmp = OverflowRes.getReg(0);
9964 Register Ov = OverflowRes.getReg(1);
9965 MachineInstrBuilder Clamp;
9966 if (IsSigned) {
9967 // sadd.sat(a, b) ->
9968 // {tmp, ov} = saddo(a, b)
9969 // ov ? (tmp >>s 31) + 0x80000000 : r
9970 // ssub.sat(a, b) ->
9971 // {tmp, ov} = ssubo(a, b)
9972 // ov ? (tmp >>s 31) + 0x80000000 : r
9973 uint64_t NumBits = Ty.getScalarSizeInBits();
9974 auto ShiftAmount = MIRBuilder.buildConstant(Ty, NumBits - 1);
9975 auto Sign = MIRBuilder.buildAShr(Ty, Tmp, ShiftAmount);
9976 auto MinVal =
9977 MIRBuilder.buildConstant(Ty, APInt::getSignedMinValue(NumBits));
9978 Clamp = MIRBuilder.buildAdd(Ty, Sign, MinVal);
9979 } else {
9980 // uadd.sat(a, b) ->
9981 // {tmp, ov} = uaddo(a, b)
9982 // ov ? 0xffffffff : tmp
9983 // usub.sat(a, b) ->
9984 // {tmp, ov} = usubo(a, b)
9985 // ov ? 0 : tmp
9986 Clamp = MIRBuilder.buildConstant(Ty, IsAdd ? -1 : 0);
9987 }
9988 MIRBuilder.buildSelect(Res, Ov, Clamp, Tmp);
9989
9990 MI.eraseFromParent();
9991 return Legalized;
9992}
9993
9996 assert((MI.getOpcode() == TargetOpcode::G_SSHLSAT ||
9997 MI.getOpcode() == TargetOpcode::G_USHLSAT) &&
9998 "Expected shlsat opcode!");
9999 bool IsSigned = MI.getOpcode() == TargetOpcode::G_SSHLSAT;
10000 auto [Res, LHS, RHS] = MI.getFirst3Regs();
10001 LLT Ty = MRI.getType(Res);
10002 LLT BoolTy = Ty.changeElementSize(1);
10003
10004 unsigned BW = Ty.getScalarSizeInBits();
10005 auto Result = MIRBuilder.buildShl(Ty, LHS, RHS);
10006 auto Orig = IsSigned ? MIRBuilder.buildAShr(Ty, Result, RHS)
10007 : MIRBuilder.buildLShr(Ty, Result, RHS);
10008
10009 MachineInstrBuilder SatVal;
10010 if (IsSigned) {
10011 auto SatMin = MIRBuilder.buildConstant(Ty, APInt::getSignedMinValue(BW));
10012 auto SatMax = MIRBuilder.buildConstant(Ty, APInt::getSignedMaxValue(BW));
10013 auto Cmp = MIRBuilder.buildICmp(CmpInst::ICMP_SLT, BoolTy, LHS,
10014 MIRBuilder.buildConstant(Ty, 0));
10015 SatVal = MIRBuilder.buildSelect(Ty, Cmp, SatMin, SatMax);
10016 } else {
10017 SatVal = MIRBuilder.buildConstant(Ty, APInt::getMaxValue(BW));
10018 }
10019 auto Ov = MIRBuilder.buildICmp(CmpInst::ICMP_NE, BoolTy, LHS, Orig);
10020 MIRBuilder.buildSelect(Res, Ov, SatVal, Result);
10021
10022 MI.eraseFromParent();
10023 return Legalized;
10024}
10025
10027 auto [Dst, Src] = MI.getFirst2Regs();
10028 const LLT Ty = MRI.getType(Src);
10029 unsigned SizeInBytes = (Ty.getScalarSizeInBits() + 7) / 8;
10030 unsigned BaseShiftAmt = (SizeInBytes - 1) * 8;
10031
10032 // Swap most and least significant byte, set remaining bytes in Res to zero.
10033 auto ShiftAmt = MIRBuilder.buildConstant(Ty, BaseShiftAmt);
10034 auto LSByteShiftedLeft = MIRBuilder.buildShl(Ty, Src, ShiftAmt);
10035 auto MSByteShiftedRight = MIRBuilder.buildLShr(Ty, Src, ShiftAmt);
10036 auto Res = MIRBuilder.buildOr(Ty, MSByteShiftedRight, LSByteShiftedLeft);
10037
10038 // Set i-th high/low byte in Res to i-th low/high byte from Src.
10039 for (unsigned i = 1; i < SizeInBytes / 2; ++i) {
10040 // AND with Mask leaves byte i unchanged and sets remaining bytes to 0.
10041 APInt APMask(SizeInBytes * 8, 0xFF << (i * 8));
10042 auto Mask = MIRBuilder.buildConstant(Ty, APMask);
10043 auto ShiftAmt = MIRBuilder.buildConstant(Ty, BaseShiftAmt - 16 * i);
10044 // Low byte shifted left to place of high byte: (Src & Mask) << ShiftAmt.
10045 auto LoByte = MIRBuilder.buildAnd(Ty, Src, Mask);
10046 auto LoShiftedLeft = MIRBuilder.buildShl(Ty, LoByte, ShiftAmt);
10047 Res = MIRBuilder.buildOr(Ty, Res, LoShiftedLeft);
10048 // High byte shifted right to place of low byte: (Src >> ShiftAmt) & Mask.
10049 auto SrcShiftedRight = MIRBuilder.buildLShr(Ty, Src, ShiftAmt);
10050 auto HiShiftedRight = MIRBuilder.buildAnd(Ty, SrcShiftedRight, Mask);
10051 Res = MIRBuilder.buildOr(Ty, Res, HiShiftedRight);
10052 }
10053 Res.getInstr()->getOperand(0).setReg(Dst);
10054
10055 MI.eraseFromParent();
10056 return Legalized;
10057}
10058
10059//{ (Src & Mask) >> N } | { (Src << N) & Mask }
10061 MachineInstrBuilder Src, const APInt &Mask) {
10062 const LLT Ty = Dst.getLLTTy(*B.getMRI());
10063 MachineInstrBuilder C_N = B.buildConstant(Ty, N);
10064 MachineInstrBuilder MaskLoNTo0 = B.buildConstant(Ty, Mask);
10065 auto LHS = B.buildLShr(Ty, B.buildAnd(Ty, Src, MaskLoNTo0), C_N);
10066 auto RHS = B.buildAnd(Ty, B.buildShl(Ty, Src, C_N), MaskLoNTo0);
10067 return B.buildOr(Dst, LHS, RHS);
10068}
10069
10072 auto [Dst, Src] = MI.getFirst2Regs();
10073 const LLT SrcTy = MRI.getType(Src);
10074 unsigned Size = SrcTy.getScalarSizeInBits();
10075 unsigned VSize = SrcTy.getSizeInBits();
10076
10077 if (Size >= 8) {
10078 if (SrcTy.isVector() && (VSize % 8 == 0) &&
10079 (LI.isLegal({TargetOpcode::G_BITREVERSE,
10080 {LLT::fixed_vector(VSize / 8, LLT::integer(8)),
10081 LLT::fixed_vector(VSize / 8, LLT::integer(8))}}))) {
10082 // If bitreverse is legal for i8 vector of the same size, then cast
10083 // to i8 vector type.
10084 // e.g. v4s32 -> v16s8
10085 LLT VTy = LLT::fixed_vector(VSize / 8, LLT::integer(8));
10086 auto BSWAP = MIRBuilder.buildBSwap(SrcTy, Src);
10087 auto Cast = MIRBuilder.buildBitcast(VTy, BSWAP);
10088 auto RBIT = MIRBuilder.buildBitReverse(VTy, Cast);
10089 MIRBuilder.buildBitcast(Dst, RBIT);
10090 } else {
10091 MachineInstrBuilder BSWAP =
10092 MIRBuilder.buildInstr(TargetOpcode::G_BSWAP, {SrcTy}, {Src});
10093
10094 // swap high and low 4 bits in 8 bit blocks 7654|3210 -> 3210|7654
10095 // [(val & 0xF0F0F0F0) >> 4] | [(val & 0x0F0F0F0F) << 4]
10096 // -> [(val & 0xF0F0F0F0) >> 4] | [(val << 4) & 0xF0F0F0F0]
10097 MachineInstrBuilder Swap4 = SwapN(4, SrcTy, MIRBuilder, BSWAP,
10098 APInt::getSplat(Size, APInt(8, 0xF0)));
10099
10100 // swap high and low 2 bits in 4 bit blocks 32|10 76|54 -> 10|32 54|76
10101 // [(val & 0xCCCCCCCC) >> 2] & [(val & 0x33333333) << 2]
10102 // -> [(val & 0xCCCCCCCC) >> 2] & [(val << 2) & 0xCCCCCCCC]
10103 MachineInstrBuilder Swap2 = SwapN(2, SrcTy, MIRBuilder, Swap4,
10104 APInt::getSplat(Size, APInt(8, 0xCC)));
10105
10106 // swap high and low 1 bit in 2 bit blocks 1|0 3|2 5|4 7|6 -> 0|1 2|3 4|5
10107 // 6|7
10108 // [(val & 0xAAAAAAAA) >> 1] & [(val & 0x55555555) << 1]
10109 // -> [(val & 0xAAAAAAAA) >> 1] & [(val << 1) & 0xAAAAAAAA]
10110 SwapN(1, Dst, MIRBuilder, Swap2, APInt::getSplat(Size, APInt(8, 0xAA)));
10111 }
10112 } else {
10113 // Expand bitreverse for types smaller than 8 bits.
10115 for (unsigned I = 0, J = Size - 1; I < Size; ++I, --J) {
10117 if (I < J) {
10118 auto ShAmt = MIRBuilder.buildConstant(SrcTy, J - I);
10119 Tmp2 = MIRBuilder.buildShl(SrcTy, Src, ShAmt);
10120 } else {
10121 auto ShAmt = MIRBuilder.buildConstant(SrcTy, I - J);
10122 Tmp2 = MIRBuilder.buildLShr(SrcTy, Src, ShAmt);
10123 }
10124
10125 auto Mask = MIRBuilder.buildConstant(SrcTy, 1ULL << J);
10126 Tmp2 = MIRBuilder.buildAnd(SrcTy, Tmp2, Mask);
10127 if (I == 0)
10128 Tmp = Tmp2;
10129 else
10130 Tmp = MIRBuilder.buildOr(SrcTy, Tmp, Tmp2);
10131 }
10132 MIRBuilder.buildCopy(Dst, Tmp);
10133 }
10134
10135 MI.eraseFromParent();
10136 return Legalized;
10137}
10138
10141 MachineFunction &MF = MIRBuilder.getMF();
10142
10143 bool IsRead = MI.getOpcode() == TargetOpcode::G_READ_REGISTER;
10144 int NameOpIdx = IsRead ? 1 : 0;
10145 int ValRegIndex = IsRead ? 0 : 1;
10146
10147 Register ValReg = MI.getOperand(ValRegIndex).getReg();
10148 const LLT Ty = MRI.getType(ValReg);
10149 const MDString *RegStr = cast<MDString>(
10150 cast<MDNode>(MI.getOperand(NameOpIdx).getMetadata())->getOperand(0));
10151
10152 Register PhysReg = TLI.getRegisterByName(RegStr->getString().data(), Ty, MF);
10153 if (!PhysReg) {
10154 const Function &Fn = MF.getFunction();
10156 "invalid register \"" + Twine(RegStr->getString().data()) + "\" for " +
10157 (IsRead ? "llvm.read_register" : "llvm.write_register"),
10158 Fn, MI.getDebugLoc()));
10159 if (IsRead)
10160 MIRBuilder.buildUndef(ValReg);
10161
10162 MI.eraseFromParent();
10163 return Legalized;
10164 }
10165
10166 if (IsRead)
10167 MIRBuilder.buildCopy(ValReg, PhysReg);
10168 else
10169 MIRBuilder.buildCopy(PhysReg, ValReg);
10170
10171 MI.eraseFromParent();
10172 return Legalized;
10173}
10174
10177 bool IsSigned = MI.getOpcode() == TargetOpcode::G_SMULH;
10178 unsigned ExtOp = IsSigned ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
10179 Register Result = MI.getOperand(0).getReg();
10180 LLT OrigTy = MRI.getType(Result);
10181 auto SizeInBits = OrigTy.getScalarSizeInBits();
10182 LLT WideTy = OrigTy.changeElementSize(SizeInBits * 2);
10183
10184 auto LHS = MIRBuilder.buildInstr(ExtOp, {WideTy}, {MI.getOperand(1)});
10185 auto RHS = MIRBuilder.buildInstr(ExtOp, {WideTy}, {MI.getOperand(2)});
10186 auto Mul = MIRBuilder.buildMul(WideTy, LHS, RHS);
10187 unsigned ShiftOp = IsSigned ? TargetOpcode::G_ASHR : TargetOpcode::G_LSHR;
10188
10189 auto ShiftAmt = MIRBuilder.buildConstant(WideTy, SizeInBits);
10190 auto Shifted = MIRBuilder.buildInstr(ShiftOp, {WideTy}, {Mul, ShiftAmt});
10191 MIRBuilder.buildTrunc(Result, Shifted);
10192
10193 MI.eraseFromParent();
10194 return Legalized;
10195}
10196
10199 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
10200 FPClassTest Mask = static_cast<FPClassTest>(MI.getOperand(2).getImm());
10201
10202 if (Mask == fcNone) {
10203 MIRBuilder.buildConstant(DstReg, 0);
10204 MI.eraseFromParent();
10205 return Legalized;
10206 }
10207 if (Mask == fcAllFlags) {
10208 MIRBuilder.buildConstant(DstReg, 1);
10209 MI.eraseFromParent();
10210 return Legalized;
10211 }
10212
10213 // TODO: Try inverting the test with getInvertedFPClassTest like the DAG
10214 // version
10215
10216 unsigned BitSize = SrcTy.getScalarSizeInBits();
10217 const fltSemantics &Semantics = getFltSemanticForLLT(SrcTy.getScalarType());
10218
10219 LLT IntTy = SrcTy.changeElementType(LLT::integer(BitSize));
10220 auto AsInt = SrcTy == IntTy ? MIRBuilder.buildCopy(IntTy, SrcReg)
10221 : MIRBuilder.buildBitcast(IntTy, SrcReg);
10222
10223 // Various masks.
10224 APInt SignBit = APInt::getSignMask(BitSize);
10225 APInt ValueMask = APInt::getSignedMaxValue(BitSize); // All bits but sign.
10226 APInt Inf = APFloat::getInf(Semantics).bitcastToAPInt(); // Exp and int bit.
10227 APInt ExpMask = Inf;
10228 APInt AllOneMantissa = APFloat::getLargest(Semantics).bitcastToAPInt() & ~Inf;
10229 APInt QNaNBitMask =
10230 APInt::getOneBitSet(BitSize, AllOneMantissa.getActiveBits() - 1);
10231 APInt InversionMask = APInt::getAllOnes(DstTy.getScalarSizeInBits());
10232
10233 auto SignBitC = MIRBuilder.buildConstant(IntTy, SignBit);
10234 auto ValueMaskC = MIRBuilder.buildConstant(IntTy, ValueMask);
10235 auto InfC = MIRBuilder.buildConstant(IntTy, Inf);
10236 auto ExpMaskC = MIRBuilder.buildConstant(IntTy, ExpMask);
10237 auto ZeroC = MIRBuilder.buildConstant(IntTy, 0);
10238
10239 auto Abs = MIRBuilder.buildAnd(IntTy, AsInt, ValueMaskC);
10240 auto Sign =
10241 MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_NE, DstTy, AsInt, Abs);
10242
10243 auto Res = MIRBuilder.buildConstant(DstTy, 0);
10244 // Clang doesn't support capture of structured bindings:
10245 LLT DstTyCopy = DstTy;
10246 const auto appendToRes = [&](MachineInstrBuilder ToAppend) {
10247 Res = MIRBuilder.buildOr(DstTyCopy, Res, ToAppend);
10248 };
10249
10250 // Tests that involve more than one class should be processed first.
10251 if ((Mask & fcFinite) == fcFinite) {
10252 // finite(V) ==> abs(V) u< exp_mask
10253 appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_ULT, DstTy, Abs,
10254 ExpMaskC));
10255 Mask &= ~fcFinite;
10256 } else if ((Mask & fcFinite) == fcPosFinite) {
10257 // finite(V) && V > 0 ==> V u< exp_mask
10258 appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_ULT, DstTy, AsInt,
10259 ExpMaskC));
10260 Mask &= ~fcPosFinite;
10261 } else if ((Mask & fcFinite) == fcNegFinite) {
10262 // finite(V) && V < 0 ==> abs(V) u< exp_mask && signbit == 1
10263 auto Cmp = MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_ULT, DstTy, Abs,
10264 ExpMaskC);
10265 auto And = MIRBuilder.buildAnd(DstTy, Cmp, Sign);
10266 appendToRes(And);
10267 Mask &= ~fcNegFinite;
10268 }
10269
10270 if (FPClassTest PartialCheck = Mask & (fcZero | fcSubnormal)) {
10271 // fcZero | fcSubnormal => test all exponent bits are 0
10272 // TODO: Handle sign bit specific cases
10273 // TODO: Handle inverted case
10274 if (PartialCheck == (fcZero | fcSubnormal)) {
10275 auto ExpBits = MIRBuilder.buildAnd(IntTy, AsInt, ExpMaskC);
10276 appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy,
10277 ExpBits, ZeroC));
10278 Mask &= ~PartialCheck;
10279 }
10280 }
10281
10282 // Check for individual classes.
10283 if (FPClassTest PartialCheck = Mask & fcZero) {
10284 if (PartialCheck == fcPosZero)
10285 appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy,
10286 AsInt, ZeroC));
10287 else if (PartialCheck == fcZero)
10288 appendToRes(
10289 MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy, Abs, ZeroC));
10290 else // fcNegZero
10291 appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy,
10292 AsInt, SignBitC));
10293 }
10294
10295 if (FPClassTest PartialCheck = Mask & fcSubnormal) {
10296 // issubnormal(V) ==> unsigned(abs(V) - 1) u< (all mantissa bits set)
10297 // issubnormal(V) && V>0 ==> unsigned(V - 1) u< (all mantissa bits set)
10298 auto V = (PartialCheck == fcPosSubnormal) ? AsInt : Abs;
10299 auto OneC = MIRBuilder.buildConstant(IntTy, 1);
10300 auto VMinusOne = MIRBuilder.buildSub(IntTy, V, OneC);
10301 auto SubnormalRes =
10302 MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_ULT, DstTy, VMinusOne,
10303 MIRBuilder.buildConstant(IntTy, AllOneMantissa));
10304 if (PartialCheck == fcNegSubnormal)
10305 SubnormalRes = MIRBuilder.buildAnd(DstTy, SubnormalRes, Sign);
10306 appendToRes(SubnormalRes);
10307 }
10308
10309 if (FPClassTest PartialCheck = Mask & fcInf) {
10310 if (PartialCheck == fcPosInf)
10311 appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy,
10312 AsInt, InfC));
10313 else if (PartialCheck == fcInf)
10314 appendToRes(
10315 MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy, Abs, InfC));
10316 else { // fcNegInf
10317 APInt NegInf = APFloat::getInf(Semantics, true).bitcastToAPInt();
10318 auto NegInfC = MIRBuilder.buildConstant(IntTy, NegInf);
10319 appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy,
10320 AsInt, NegInfC));
10321 }
10322 }
10323
10324 if (FPClassTest PartialCheck = Mask & fcNan) {
10325 auto InfWithQnanBitC = MIRBuilder.buildConstant(IntTy, Inf | QNaNBitMask);
10326 if (PartialCheck == fcNan) {
10327 // isnan(V) ==> abs(V) u> int(inf)
10328 appendToRes(
10329 MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_UGT, DstTy, Abs, InfC));
10330 } else if (PartialCheck == fcQNan) {
10331 // isquiet(V) ==> abs(V) u>= (unsigned(Inf) | quiet_bit)
10332 appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_UGE, DstTy, Abs,
10333 InfWithQnanBitC));
10334 } else { // fcSNan
10335 // issignaling(V) ==> abs(V) u> unsigned(Inf) &&
10336 // abs(V) u< (unsigned(Inf) | quiet_bit)
10337 auto IsNan =
10338 MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_UGT, DstTy, Abs, InfC);
10339 auto IsNotQnan = MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_ULT, DstTy,
10340 Abs, InfWithQnanBitC);
10341 appendToRes(MIRBuilder.buildAnd(DstTy, IsNan, IsNotQnan));
10342 }
10343 }
10344
10345 if (FPClassTest PartialCheck = Mask & fcNormal) {
10346 // isnormal(V) ==> (0 u< exp u< max_exp) ==> (unsigned(exp-1) u<
10347 // (max_exp-1))
10348 APInt ExpLSB = ExpMask & ~(ExpMask.shl(1));
10349 auto ExpMinusOne = MIRBuilder.buildSub(
10350 IntTy, Abs, MIRBuilder.buildConstant(IntTy, ExpLSB));
10351 APInt MaxExpMinusOne = ExpMask - ExpLSB;
10352 auto NormalRes =
10353 MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_ULT, DstTy, ExpMinusOne,
10354 MIRBuilder.buildConstant(IntTy, MaxExpMinusOne));
10355 if (PartialCheck == fcNegNormal)
10356 NormalRes = MIRBuilder.buildAnd(DstTy, NormalRes, Sign);
10357 else if (PartialCheck == fcPosNormal) {
10358 auto PosSign = MIRBuilder.buildXor(
10359 DstTy, Sign, MIRBuilder.buildConstant(DstTy, InversionMask));
10360 NormalRes = MIRBuilder.buildAnd(DstTy, NormalRes, PosSign);
10361 }
10362 appendToRes(NormalRes);
10363 }
10364
10365 MIRBuilder.buildCopy(DstReg, Res);
10366 MI.eraseFromParent();
10367 return Legalized;
10368}
10369
10371 // Implement G_SELECT in terms of XOR, AND, OR.
10372 auto [DstReg, DstTy, MaskReg, MaskTy, Op1Reg, Op1Ty, Op2Reg, Op2Ty] =
10373 MI.getFirst4RegLLTs();
10374
10375 LLT Op1TyInt =
10376 Op1Ty.changeElementType(LLT::integer(Op1Ty.getScalarSizeInBits()));
10377
10378 bool IsEltPtr = DstTy.isPointerOrPointerVector();
10379 if (IsEltPtr) {
10380 LLT ScalarPtrTy = LLT::integer(DstTy.getScalarSizeInBits());
10381 LLT NewTy = DstTy.changeElementType(ScalarPtrTy);
10382 Op1Reg = MIRBuilder.buildPtrToInt(NewTy, Op1Reg).getReg(0);
10383 Op1Ty = MRI.getType(Op1Reg);
10384 Op2Reg = MIRBuilder.buildPtrToInt(NewTy, Op2Reg).getReg(0);
10385 Op2Ty = MRI.getType(Op2Reg);
10386 DstTy = NewTy;
10387 }
10388
10389 if (MaskTy.isScalar()) {
10390 // Turn the scalar condition into a vector condition mask if needed.
10391
10392 Register MaskElt = MaskReg;
10393
10394 // The condition was potentially zero extended before, but we want a sign
10395 // extended boolean.
10396 if (MaskTy != LLT::scalar(1))
10397 MaskElt = MIRBuilder.buildSExtInReg(MaskTy, MaskElt, 1).getReg(0);
10398
10399 // Continue the sign extension (or truncate) to match the data type.
10400 MaskTy = DstTy.changeElementType(LLT::integer(DstTy.getScalarSizeInBits()));
10401 MaskElt =
10402 MIRBuilder.buildSExtOrTrunc(MaskTy.getScalarType(), MaskElt).getReg(0);
10403
10404 if (DstTy.isVector()) {
10405 // Generate a vector splat idiom.
10406 auto ShufSplat = MIRBuilder.buildShuffleSplat(MaskTy, MaskElt);
10407 MaskReg = ShufSplat.getReg(0);
10408 } else {
10409 MaskReg = MaskElt;
10410 }
10411 } else if (!DstTy.isVector()) {
10412 // Cannot handle the case that mask is a vector and dst is a scalar.
10413 return UnableToLegalize;
10414 }
10415
10416 if (MaskTy.getSizeInBits() != DstTy.getSizeInBits()) {
10417 return UnableToLegalize;
10418 }
10419
10420 if (!Op1Ty.getScalarType().isAnyScalar() &&
10421 !Op1Ty.getScalarType().isInteger())
10422 Op1Reg = MIRBuilder.buildBitcast(Op1TyInt, Op1Reg).getReg(0);
10423
10424 if (!Op2Ty.getScalarType().isAnyScalar() &&
10425 !Op2Ty.getScalarType().isInteger()) {
10426 auto Op2TyInt =
10427 Op2Ty.changeElementType(LLT::integer(Op2Ty.getScalarSizeInBits()));
10428 Op2Reg = MIRBuilder.buildBitcast(Op2TyInt, Op2Reg).getReg(0);
10429 }
10430
10431 auto NotMask = MIRBuilder.buildNot(MaskTy, MaskReg);
10432 auto NewOp1 = MIRBuilder.buildAnd(MaskTy, Op1Reg, MaskReg);
10433 auto NewOp2 = MIRBuilder.buildAnd(MaskTy, Op2Reg, NotMask);
10434 if (IsEltPtr) {
10435 auto Or = MIRBuilder.buildOr(DstTy, NewOp1, NewOp2);
10436 MIRBuilder.buildIntToPtr(DstReg, Or);
10437 } else {
10438 if (DstTy == Op1TyInt)
10439 MIRBuilder.buildOr(DstReg, NewOp1, NewOp2);
10440 else {
10441 auto Or = MIRBuilder.buildOr(Op1TyInt, NewOp1, NewOp2);
10442 MIRBuilder.buildBitcast(DstReg, Or.getReg(0));
10443 }
10444 }
10445 MI.eraseFromParent();
10446 return Legalized;
10447}
10448
10450 // Split DIVREM into individual instructions.
10451 unsigned Opcode = MI.getOpcode();
10452
10453 MIRBuilder.buildInstr(
10454 Opcode == TargetOpcode::G_SDIVREM ? TargetOpcode::G_SDIV
10455 : TargetOpcode::G_UDIV,
10456 {MI.getOperand(0).getReg()}, {MI.getOperand(2), MI.getOperand(3)});
10457 MIRBuilder.buildInstr(
10458 Opcode == TargetOpcode::G_SDIVREM ? TargetOpcode::G_SREM
10459 : TargetOpcode::G_UREM,
10460 {MI.getOperand(1).getReg()}, {MI.getOperand(2), MI.getOperand(3)});
10461 MI.eraseFromParent();
10462 return Legalized;
10463}
10464
10467 // Expand %res = G_ABS %a into:
10468 // %v1 = G_ASHR %a, scalar_size-1
10469 // %v2 = G_ADD %a, %v1
10470 // %res = G_XOR %v2, %v1
10471 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
10472 Register OpReg = MI.getOperand(1).getReg();
10473 auto ShiftAmt =
10474 MIRBuilder.buildConstant(DstTy, DstTy.getScalarSizeInBits() - 1);
10475 auto Shift = MIRBuilder.buildAShr(DstTy, OpReg, ShiftAmt);
10476 auto Add = MIRBuilder.buildAdd(DstTy, OpReg, Shift);
10477 MIRBuilder.buildXor(MI.getOperand(0).getReg(), Add, Shift);
10478 MI.eraseFromParent();
10479 return Legalized;
10480}
10481
10484 // Expand %res = G_ABS %a into:
10485 // %v1 = G_CONSTANT 0
10486 // %v2 = G_SUB %v1, %a
10487 // %res = G_SMAX %a, %v2
10488 Register SrcReg = MI.getOperand(1).getReg();
10489 LLT Ty = MRI.getType(SrcReg);
10490 auto Zero = MIRBuilder.buildConstant(Ty, 0);
10491 auto Sub = MIRBuilder.buildSub(Ty, Zero, SrcReg);
10492 MIRBuilder.buildSMax(MI.getOperand(0), SrcReg, Sub);
10493 MI.eraseFromParent();
10494 return Legalized;
10495}
10496
10499 Register SrcReg = MI.getOperand(1).getReg();
10500 Register DestReg = MI.getOperand(0).getReg();
10501 LLT Ty = MRI.getType(SrcReg), IType = LLT::scalar(1);
10502 auto Zero = MIRBuilder.buildConstant(Ty, 0).getReg(0);
10503 auto Sub = MIRBuilder.buildSub(Ty, Zero, SrcReg).getReg(0);
10504 auto ICmp = MIRBuilder.buildICmp(CmpInst::ICMP_SGT, IType, SrcReg, Zero);
10505 MIRBuilder.buildSelect(DestReg, ICmp, SrcReg, Sub);
10506 MI.eraseFromParent();
10507 return Legalized;
10508}
10509
10512 assert((MI.getOpcode() == TargetOpcode::G_ABDS ||
10513 MI.getOpcode() == TargetOpcode::G_ABDU) &&
10514 "Expected G_ABDS or G_ABDU instruction");
10515
10516 auto [DstReg, LHS, RHS] = MI.getFirst3Regs();
10517 LLT Ty = MRI.getType(LHS);
10518
10519 // abds(lhs, rhs) -> select(sgt(lhs,rhs), sub(lhs,rhs), sub(rhs,lhs))
10520 // abdu(lhs, rhs) -> select(ugt(lhs,rhs), sub(lhs,rhs), sub(rhs,lhs))
10521 Register LHSSub = MIRBuilder.buildSub(Ty, LHS, RHS).getReg(0);
10522 Register RHSSub = MIRBuilder.buildSub(Ty, RHS, LHS).getReg(0);
10523 CmpInst::Predicate Pred = (MI.getOpcode() == TargetOpcode::G_ABDS)
10526 auto ICmp = MIRBuilder.buildICmp(Pred, LLT::scalar(1), LHS, RHS);
10527 MIRBuilder.buildSelect(DstReg, ICmp, LHSSub, RHSSub);
10528
10529 MI.eraseFromParent();
10530 return Legalized;
10531}
10532
10535 assert((MI.getOpcode() == TargetOpcode::G_ABDS ||
10536 MI.getOpcode() == TargetOpcode::G_ABDU) &&
10537 "Expected G_ABDS or G_ABDU instruction");
10538
10539 auto [DstReg, LHS, RHS] = MI.getFirst3Regs();
10540 LLT Ty = MRI.getType(LHS);
10541
10542 // abds(lhs, rhs) -→ sub(smax(lhs, rhs), smin(lhs, rhs))
10543 // abdu(lhs, rhs) -→ sub(umax(lhs, rhs), umin(lhs, rhs))
10544 Register MaxReg, MinReg;
10545 if (MI.getOpcode() == TargetOpcode::G_ABDS) {
10546 MaxReg = MIRBuilder.buildSMax(Ty, LHS, RHS).getReg(0);
10547 MinReg = MIRBuilder.buildSMin(Ty, LHS, RHS).getReg(0);
10548 } else {
10549 MaxReg = MIRBuilder.buildUMax(Ty, LHS, RHS).getReg(0);
10550 MinReg = MIRBuilder.buildUMin(Ty, LHS, RHS).getReg(0);
10551 }
10552 MIRBuilder.buildSub(DstReg, MaxReg, MinReg);
10553
10554 MI.eraseFromParent();
10555 return Legalized;
10556}
10557
10559 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
10560 LLT TyInt =
10561 DstTy.changeElementType(LLT::integer(DstTy.getScalarSizeInBits()));
10562 Register CastedSrc = SrcReg;
10563
10564 if (!(SrcTy.getScalarType().isAnyScalar() ||
10565 SrcTy.getScalarType().isInteger())) {
10566 auto SrcTyInt =
10567 SrcTy.changeElementType(LLT::integer(SrcTy.getScalarSizeInBits()));
10568 CastedSrc = MIRBuilder.buildBitcast(SrcTyInt, SrcReg).getReg(0);
10569 }
10570
10571 if (MRI.getType(DstReg) != TyInt) {
10572 // Reset sign bit
10573 Register NewDst =
10575 .buildAnd(TyInt, CastedSrc,
10576 MIRBuilder.buildConstant(
10578 DstTy.getScalarSizeInBits())))
10579 .getReg(0);
10580
10581 MIRBuilder.buildBitcast(DstReg, NewDst);
10582 } else
10584 .buildAnd(
10585 DstReg, CastedSrc,
10586 MIRBuilder.buildConstant(
10587 TyInt, APInt::getSignedMaxValue(DstTy.getScalarSizeInBits())))
10588 .getReg(0);
10589
10590 MI.eraseFromParent();
10591 return Legalized;
10592}
10593
10596 Register SrcReg = MI.getOperand(1).getReg();
10597 LLT SrcTy = MRI.getType(SrcReg);
10598 LLT DstTy = MRI.getType(SrcReg);
10599
10600 // The source could be a scalar if the IR type was <1 x sN>.
10601 if (SrcTy.isScalar()) {
10602 if (DstTy.getSizeInBits() > SrcTy.getSizeInBits())
10603 return UnableToLegalize; // FIXME: handle extension.
10604 // This can be just a plain copy.
10605 Observer.changingInstr(MI);
10606 MI.setDesc(MIRBuilder.getTII().get(TargetOpcode::COPY));
10607 Observer.changedInstr(MI);
10608 return Legalized;
10609 }
10610 return UnableToLegalize;
10611}
10612
10614 MachineFunction &MF = *MI.getMF();
10615 const DataLayout &DL = MIRBuilder.getDataLayout();
10616 LLVMContext &Ctx = MF.getFunction().getContext();
10617 Register ListPtr = MI.getOperand(1).getReg();
10618 LLT PtrTy = MRI.getType(ListPtr);
10619
10620 // LstPtr is a pointer to the head of the list. Get the address
10621 // of the head of the list.
10622 Align PtrAlignment = DL.getABITypeAlign(getTypeForLLT(PtrTy, Ctx));
10623 MachineMemOperand *PtrLoadMMO = MF.getMachineMemOperand(
10624 MachinePointerInfo(), MachineMemOperand::MOLoad, PtrTy, PtrAlignment);
10625 auto VAList = MIRBuilder.buildLoad(PtrTy, ListPtr, *PtrLoadMMO).getReg(0);
10626
10627 const Align A(MI.getOperand(2).getImm());
10628 LLT PtrTyAsScalarTy = LLT::scalar(PtrTy.getSizeInBits());
10629 if (A > TLI.getMinStackArgumentAlignment()) {
10630 Register AlignAmt =
10631 MIRBuilder.buildConstant(PtrTyAsScalarTy, A.value() - 1).getReg(0);
10632 auto AddDst = MIRBuilder.buildPtrAdd(PtrTy, VAList, AlignAmt);
10633 auto AndDst = MIRBuilder.buildMaskLowPtrBits(PtrTy, AddDst, Log2(A));
10634 VAList = AndDst.getReg(0);
10635 }
10636
10637 // Increment the pointer, VAList, to the next vaarg
10638 // The list should be bumped by the size of element in the current head of
10639 // list.
10640 Register Dst = MI.getOperand(0).getReg();
10641 LLT LLTTy = MRI.getType(Dst);
10642 Type *Ty = getTypeForLLT(LLTTy, Ctx);
10643 auto IncAmt =
10644 MIRBuilder.buildConstant(PtrTyAsScalarTy, DL.getTypeAllocSize(Ty));
10645 auto Succ = MIRBuilder.buildPtrAdd(PtrTy, VAList, IncAmt);
10646
10647 // Store the increment VAList to the legalized pointer
10649 MachinePointerInfo(), MachineMemOperand::MOStore, PtrTy, PtrAlignment);
10650 MIRBuilder.buildStore(Succ, ListPtr, *StoreMMO);
10651 // Load the actual argument out of the pointer VAList
10652 Align EltAlignment = DL.getABITypeAlign(Ty);
10653 MachineMemOperand *EltLoadMMO = MF.getMachineMemOperand(
10654 MachinePointerInfo(), MachineMemOperand::MOLoad, LLTTy, EltAlignment);
10655 MIRBuilder.buildLoad(Dst, VAList, *EltLoadMMO);
10656
10657 MI.eraseFromParent();
10658 return Legalized;
10659}
10660
10662 [[maybe_unused]] unsigned OpCode = MI.getOpcode();
10663 assert((OpCode == TargetOpcode::G_SMULFIX ||
10664 OpCode == TargetOpcode::G_UMULFIX) &&
10665 "Operator must be either G_SMULFIX or G_UMULFIX!");
10666 auto [Dst, LHS, RHS] = MI.getFirst3Regs();
10667 LLT Ty = MRI.getType(Dst);
10668 unsigned Scale = MI.getOperand(3).getImm();
10669
10670 if (Scale == 0) {
10671 MIRBuilder.buildMul(Dst, LHS, RHS);
10672 MI.eraseFromParent();
10673 return Legalized;
10674 }
10675
10676 // TODO: Port other lowerng paths from SelectionDAG.
10677 LLT WideTy = Ty.changeElementSize(Ty.getScalarSizeInBits() * 2);
10678 auto ShiftAmt = MIRBuilder.buildConstant(WideTy, Scale);
10679 MachineInstrBuilder ExtLHS{}, ExtRHS{}, Shift{};
10680 if (MI.getOpcode() == TargetOpcode::G_SMULFIX) {
10681 ExtLHS = MIRBuilder.buildSExt(WideTy, LHS);
10682 ExtRHS = MIRBuilder.buildSExt(WideTy, RHS);
10683 } else {
10684 ExtLHS = MIRBuilder.buildZExt(WideTy, LHS);
10685 ExtRHS = MIRBuilder.buildZExt(WideTy, RHS);
10686 }
10687
10688 auto Mul = MIRBuilder.buildMul(WideTy, ExtLHS, ExtRHS);
10689 if (MI.getOpcode() == TargetOpcode::G_SMULFIX)
10690 Shift = MIRBuilder.buildAShr(WideTy, Mul, ShiftAmt);
10691 else
10692 Shift = MIRBuilder.buildLShr(WideTy, Mul, ShiftAmt);
10693
10694 MIRBuilder.buildTrunc(Dst, Shift);
10695
10696 MI.eraseFromParent();
10697 return Legalized;
10698}
10699
10701 // On Darwin, -Os means optimize for size without hurting performance, so
10702 // only really optimize for size when -Oz (MinSize) is used.
10704 return MF.getFunction().hasMinSize();
10705 return MF.getFunction().hasOptSize();
10706}
10707
10708// Returns a list of types to use for memory op lowering in MemOps. A partial
10709// port of findOptimalMemOpLowering in TargetLowering.
10710static bool findGISelOptimalMemOpLowering(std::vector<LLT> &MemOps,
10711 unsigned Limit, const MemOp &Op,
10712 unsigned DstAS, unsigned SrcAS,
10713 const AttributeList &FuncAttributes,
10714 const TargetLowering &TLI) {
10715 if (Op.isMemcpyWithFixedDstAlign() && Op.getSrcAlign() < Op.getDstAlign())
10716 return false;
10717
10718 LLT Ty = TLI.getOptimalMemOpLLT(Op, FuncAttributes);
10719
10720 if (Ty == LLT()) {
10721 // Use the largest scalar type whose alignment constraints are satisfied.
10722 // We only need to check DstAlign here as SrcAlign is always greater or
10723 // equal to DstAlign (or zero).
10724 Ty = LLT::integer(64);
10725 if (Op.isFixedDstAlign())
10726 while (Op.getDstAlign() < Ty.getSizeInBytes() &&
10727 !TLI.allowsMisalignedMemoryAccesses(Ty, DstAS, Op.getDstAlign()))
10728 Ty = LLT::integer(Ty.getSizeInBytes());
10729 assert(Ty.getSizeInBits() > 0 && "Could not find valid type");
10730 // FIXME: check for the largest legal type we can load/store to.
10731 }
10732
10733 unsigned NumMemOps = 0;
10734 uint64_t Size = Op.size();
10735 while (Size) {
10736 unsigned TySize = Ty.getSizeInBytes();
10737 while (TySize > Size) {
10738 // For now, only use non-vector load / store's for the left-over pieces.
10739 LLT NewTy = Ty;
10740 // FIXME: check for mem op safety and legality of the types. Not all of
10741 // SDAGisms map cleanly to GISel concepts.
10742 if (NewTy.isVector())
10743 NewTy =
10744 NewTy.getSizeInBits() > 64 ? LLT::integer(64) : LLT::integer(32);
10745 NewTy = LLT::integer(llvm::bit_floor(NewTy.getSizeInBits() - 1));
10746 unsigned NewTySize = NewTy.getSizeInBytes();
10747 assert(NewTySize > 0 && "Could not find appropriate type");
10748
10749 // If the new LLT cannot cover all of the remaining bits, then consider
10750 // issuing a (or a pair of) unaligned and overlapping load / store.
10751 unsigned Fast;
10752 // Need to get a VT equivalent for allowMisalignedMemoryAccesses().
10753 MVT VT = getMVTForLLT(Ty);
10754 if (NumMemOps && Op.allowOverlap() && NewTySize < Size &&
10756 VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign() : Align(1),
10758 Fast)
10759 TySize = Size;
10760 else {
10761 Ty = NewTy;
10762 TySize = NewTySize;
10763 }
10764 }
10765
10766 if (++NumMemOps > Limit)
10767 return false;
10768
10769 MemOps.push_back(Ty);
10770 Size -= TySize;
10771 }
10772
10773 return true;
10774}
10775
10776// Get a vectorized representation of the memset value operand, GISel edition.
10778 MachineRegisterInfo &MRI = *MIB.getMRI();
10779 unsigned NumBits = Ty.getScalarSizeInBits();
10780 auto ValVRegAndVal = getIConstantVRegValWithLookThrough(Val, MRI);
10781 if (!Ty.isVector() && ValVRegAndVal) {
10782 APInt Scalar = ValVRegAndVal->Value.trunc(8);
10783 APInt SplatVal = APInt::getSplat(NumBits, Scalar);
10784 return MIB.buildConstant(Ty, SplatVal).getReg(0);
10785 }
10786
10787 // Extend the byte value to the larger type, and then multiply by a magic
10788 // value 0x010101... in order to replicate it across every byte.
10789 // Unless it's zero, in which case just emit a larger G_CONSTANT 0.
10790 if (ValVRegAndVal && ValVRegAndVal->Value == 0) {
10791 return MIB.buildConstant(Ty, 0).getReg(0);
10792 }
10793
10794 LLT ExtType = Ty.getScalarType();
10795 auto ZExt = MIB.buildZExtOrTrunc(ExtType, Val);
10796 if (NumBits > 8) {
10797 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
10798 auto MagicMI = MIB.buildConstant(ExtType, Magic);
10799 Val = MIB.buildMul(ExtType, ZExt, MagicMI).getReg(0);
10800 }
10801
10802 // For vector types create a G_BUILD_VECTOR.
10803 if (Ty.isVector())
10804 Val = MIB.buildSplatBuildVector(Ty, Val).getReg(0);
10805
10806 return Val;
10807}
10808
10810LegalizerHelper::lowerMemset(MachineInstr &MI, Register Dst, Register Val,
10811 uint64_t KnownLen, Align Alignment,
10812 bool IsVolatile) {
10813 auto &MF = *MI.getParent()->getParent();
10814 const auto &TLI = *MF.getSubtarget().getTargetLowering();
10815 auto &DL = MF.getDataLayout();
10816 LLVMContext &C = MF.getFunction().getContext();
10817
10818 assert(KnownLen != 0 && "Have a zero length memset length!");
10819
10820 bool DstAlignCanChange = false;
10821 MachineFrameInfo &MFI = MF.getFrameInfo();
10822 bool OptSize = shouldLowerMemFuncForSize(MF);
10823
10824 MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
10825 if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
10826 DstAlignCanChange = true;
10827
10828 unsigned Limit = TLI.getMaxStoresPerMemset(OptSize);
10829 std::vector<LLT> MemOps;
10830
10831 const auto &DstMMO = **MI.memoperands_begin();
10832 MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
10833
10834 auto ValVRegAndVal = getIConstantVRegValWithLookThrough(Val, MRI);
10835 bool IsZeroVal = ValVRegAndVal && ValVRegAndVal->Value == 0;
10836
10837 if (!findGISelOptimalMemOpLowering(MemOps, Limit,
10838 MemOp::Set(KnownLen, DstAlignCanChange,
10839 Alignment,
10840 /*IsZeroMemset=*/IsZeroVal,
10841 /*IsVolatile=*/IsVolatile),
10842 DstPtrInfo.getAddrSpace(), ~0u,
10843 MF.getFunction().getAttributes(), TLI))
10844 return UnableToLegalize;
10845
10846 if (DstAlignCanChange) {
10847 // Get an estimate of the type from the LLT.
10848 Type *IRTy = getTypeForLLT(MemOps[0], C);
10849 Align NewAlign = DL.getABITypeAlign(IRTy);
10850 if (NewAlign > Alignment) {
10851 Alignment = NewAlign;
10852 unsigned FI = FIDef->getOperand(1).getIndex();
10853 // Give the stack frame object a larger alignment if needed.
10854 if (MFI.getObjectAlign(FI) < Alignment)
10855 MFI.setObjectAlignment(FI, Alignment);
10856 }
10857 }
10858
10859 MachineIRBuilder MIB(MI);
10860 // Find the largest store and generate the bit pattern for it.
10861 LLT LargestTy = MemOps[0];
10862 for (unsigned i = 1; i < MemOps.size(); i++)
10863 if (MemOps[i].getSizeInBits() > LargestTy.getSizeInBits())
10864 LargestTy = MemOps[i];
10865
10866 // The memset stored value is always defined as an s8, so in order to make it
10867 // work with larger store types we need to repeat the bit pattern across the
10868 // wider type.
10869 Register MemSetValue = getMemsetValue(Val, LargestTy, MIB);
10870
10871 if (!MemSetValue)
10872 return UnableToLegalize;
10873
10874 // Generate the stores. For each store type in the list, we generate the
10875 // matching store of that type to the destination address.
10876 LLT PtrTy = MRI.getType(Dst);
10877 unsigned DstOff = 0;
10878 unsigned Size = KnownLen;
10879 for (unsigned I = 0; I < MemOps.size(); I++) {
10880 LLT Ty = MemOps[I];
10881 unsigned TySize = Ty.getSizeInBytes();
10882 if (TySize > Size) {
10883 // Issuing an unaligned load / store pair that overlaps with the previous
10884 // pair. Adjust the offset accordingly.
10885 assert(I == MemOps.size() - 1 && I != 0);
10886 DstOff -= TySize - Size;
10887 }
10888
10889 // If this store is smaller than the largest store see whether we can get
10890 // the smaller value for free with a truncate.
10891 Register Value = MemSetValue;
10892 if (Ty.getSizeInBits() < LargestTy.getSizeInBits()) {
10893 MVT VT = getMVTForLLT(Ty);
10894 MVT LargestVT = getMVTForLLT(LargestTy);
10895 if (!LargestTy.isVector() && !Ty.isVector() &&
10896 TLI.isTruncateFree(LargestVT, VT))
10897 Value = MIB.buildTrunc(Ty, MemSetValue).getReg(0);
10898 else
10899 Value = getMemsetValue(Val, Ty, MIB);
10900 if (!Value)
10901 return UnableToLegalize;
10902 }
10903
10904 auto *StoreMMO = MF.getMachineMemOperand(&DstMMO, DstOff, Ty);
10905
10906 Register Ptr = Dst;
10907 if (DstOff != 0) {
10908 auto Offset =
10909 MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), DstOff);
10910 Ptr = MIB.buildObjectPtrOffset(PtrTy, Dst, Offset).getReg(0);
10911 }
10912
10913 MIB.buildStore(Value, Ptr, *StoreMMO);
10914 DstOff += Ty.getSizeInBytes();
10915 Size -= TySize;
10916 }
10917
10918 MI.eraseFromParent();
10919 return Legalized;
10920}
10921
10923LegalizerHelper::lowerMemcpyInline(MachineInstr &MI) {
10924 assert(MI.getOpcode() == TargetOpcode::G_MEMCPY_INLINE);
10925
10926 auto [Dst, Src, Len] = MI.getFirst3Regs();
10927
10928 const auto *MMOIt = MI.memoperands_begin();
10929 const MachineMemOperand *MemOp = *MMOIt;
10930 bool IsVolatile = MemOp->isVolatile();
10931
10932 // See if this is a constant length copy
10933 auto LenVRegAndVal = getIConstantVRegValWithLookThrough(Len, MRI);
10934 // FIXME: support dynamically sized G_MEMCPY_INLINE
10935 assert(LenVRegAndVal &&
10936 "inline memcpy with dynamic size is not yet supported");
10937 uint64_t KnownLen = LenVRegAndVal->Value.getZExtValue();
10938 if (KnownLen == 0) {
10939 MI.eraseFromParent();
10940 return Legalized;
10941 }
10942
10943 const auto &DstMMO = **MI.memoperands_begin();
10944 const auto &SrcMMO = **std::next(MI.memoperands_begin());
10945 Align DstAlign = DstMMO.getBaseAlign();
10946 Align SrcAlign = SrcMMO.getBaseAlign();
10947
10948 return lowerMemcpyInline(MI, Dst, Src, KnownLen, DstAlign, SrcAlign,
10949 IsVolatile);
10950}
10951
10953LegalizerHelper::lowerMemcpyInline(MachineInstr &MI, Register Dst, Register Src,
10954 uint64_t KnownLen, Align DstAlign,
10955 Align SrcAlign, bool IsVolatile) {
10956 assert(MI.getOpcode() == TargetOpcode::G_MEMCPY_INLINE);
10957 return lowerMemcpy(MI, Dst, Src, KnownLen,
10958 std::numeric_limits<uint64_t>::max(), DstAlign, SrcAlign,
10959 IsVolatile);
10960}
10961
10963LegalizerHelper::lowerMemcpy(MachineInstr &MI, Register Dst, Register Src,
10964 uint64_t KnownLen, uint64_t Limit, Align DstAlign,
10965 Align SrcAlign, bool IsVolatile) {
10966 auto &MF = *MI.getParent()->getParent();
10967 const auto &TLI = *MF.getSubtarget().getTargetLowering();
10968 auto &DL = MF.getDataLayout();
10970
10971 assert(KnownLen != 0 && "Have a zero length memcpy length!");
10972
10973 bool DstAlignCanChange = false;
10974 MachineFrameInfo &MFI = MF.getFrameInfo();
10975 Align Alignment = std::min(DstAlign, SrcAlign);
10976
10977 MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
10978 if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
10979 DstAlignCanChange = true;
10980
10981 // FIXME: infer better src pointer alignment like SelectionDAG does here.
10982 // FIXME: also use the equivalent of isMemSrcFromConstant and alwaysinlining
10983 // if the memcpy is in a tail call position.
10984
10985 std::vector<LLT> MemOps;
10986
10987 const auto &DstMMO = **MI.memoperands_begin();
10988 const auto &SrcMMO = **std::next(MI.memoperands_begin());
10989 MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
10990 MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
10991
10993 MemOps, Limit,
10994 MemOp::Copy(KnownLen, DstAlignCanChange, Alignment, SrcAlign,
10995 IsVolatile),
10996 DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
10997 MF.getFunction().getAttributes(), TLI))
10998 return UnableToLegalize;
10999
11000 if (DstAlignCanChange) {
11001 // Get an estimate of the type from the LLT.
11002 Type *IRTy = getTypeForLLT(MemOps[0], C);
11003 Align NewAlign = DL.getABITypeAlign(IRTy);
11004
11005 // Don't promote to an alignment that would require dynamic stack
11006 // realignment.
11008 if (!TRI->hasStackRealignment(MF))
11009 if (MaybeAlign StackAlign = DL.getStackAlignment())
11010 NewAlign = std::min(NewAlign, *StackAlign);
11011
11012 if (NewAlign > Alignment) {
11013 Alignment = NewAlign;
11014 unsigned FI = FIDef->getOperand(1).getIndex();
11015 // Give the stack frame object a larger alignment if needed.
11016 if (MFI.getObjectAlign(FI) < Alignment)
11017 MFI.setObjectAlignment(FI, Alignment);
11018 }
11019 }
11020
11021 LLVM_DEBUG(dbgs() << "Inlining memcpy: " << MI << " into loads & stores\n");
11022
11023 MachineIRBuilder MIB(MI);
11024 // Now we need to emit a pair of load and stores for each of the types we've
11025 // collected. I.e. for each type, generate a load from the source pointer of
11026 // that type width, and then generate a corresponding store to the dest buffer
11027 // of that value loaded. This can result in a sequence of loads and stores
11028 // mixed types, depending on what the target specifies as good types to use.
11029 unsigned CurrOffset = 0;
11030 unsigned Size = KnownLen;
11031 for (auto CopyTy : MemOps) {
11032 // Issuing an unaligned load / store pair that overlaps with the previous
11033 // pair. Adjust the offset accordingly.
11034 if (CopyTy.getSizeInBytes() > Size)
11035 CurrOffset -= CopyTy.getSizeInBytes() - Size;
11036
11037 // Construct MMOs for the accesses.
11038 auto *LoadMMO =
11039 MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes());
11040 auto *StoreMMO =
11041 MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes());
11042
11043 // Create the load.
11044 Register LoadPtr = Src;
11046 if (CurrOffset != 0) {
11047 LLT SrcTy = MRI.getType(Src);
11048 Offset =
11049 MIB.buildConstant(LLT::integer(SrcTy.getSizeInBits()), CurrOffset)
11050 .getReg(0);
11051 LoadPtr = MIB.buildObjectPtrOffset(SrcTy, Src, Offset).getReg(0);
11052 }
11053 auto LdVal = MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO);
11054
11055 // Create the store.
11056 Register StorePtr = Dst;
11057 if (CurrOffset != 0) {
11058 LLT DstTy = MRI.getType(Dst);
11059 StorePtr = MIB.buildObjectPtrOffset(DstTy, Dst, Offset).getReg(0);
11060 }
11061 MIB.buildStore(LdVal, StorePtr, *StoreMMO);
11062 CurrOffset += CopyTy.getSizeInBytes();
11063 Size -= CopyTy.getSizeInBytes();
11064 }
11065
11066 MI.eraseFromParent();
11067 return Legalized;
11068}
11069
11071LegalizerHelper::lowerMemmove(MachineInstr &MI, Register Dst, Register Src,
11072 uint64_t KnownLen, Align DstAlign, Align SrcAlign,
11073 bool IsVolatile) {
11074 auto &MF = *MI.getParent()->getParent();
11075 const auto &TLI = *MF.getSubtarget().getTargetLowering();
11076 auto &DL = MF.getDataLayout();
11077 LLVMContext &C = MF.getFunction().getContext();
11078
11079 assert(KnownLen != 0 && "Have a zero length memmove length!");
11080
11081 bool DstAlignCanChange = false;
11082 MachineFrameInfo &MFI = MF.getFrameInfo();
11083 bool OptSize = shouldLowerMemFuncForSize(MF);
11084 Align Alignment = std::min(DstAlign, SrcAlign);
11085
11086 MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
11087 if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
11088 DstAlignCanChange = true;
11089
11090 unsigned Limit = TLI.getMaxStoresPerMemmove(OptSize);
11091 std::vector<LLT> MemOps;
11092
11093 const auto &DstMMO = **MI.memoperands_begin();
11094 const auto &SrcMMO = **std::next(MI.memoperands_begin());
11095 MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
11096 MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
11097
11098 // FIXME: SelectionDAG always passes false for 'AllowOverlap', apparently due
11099 // to a bug in it's findOptimalMemOpLowering implementation. For now do the
11100 // same thing here.
11102 MemOps, Limit,
11103 MemOp::Copy(KnownLen, DstAlignCanChange, Alignment, SrcAlign,
11104 /*IsVolatile*/ true),
11105 DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
11106 MF.getFunction().getAttributes(), TLI))
11107 return UnableToLegalize;
11108
11109 if (DstAlignCanChange) {
11110 // Get an estimate of the type from the LLT.
11111 Type *IRTy = getTypeForLLT(MemOps[0], C);
11112 Align NewAlign = DL.getABITypeAlign(IRTy);
11113
11114 // Don't promote to an alignment that would require dynamic stack
11115 // realignment.
11116 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
11117 if (!TRI->hasStackRealignment(MF))
11118 if (MaybeAlign StackAlign = DL.getStackAlignment())
11119 NewAlign = std::min(NewAlign, *StackAlign);
11120
11121 if (NewAlign > Alignment) {
11122 Alignment = NewAlign;
11123 unsigned FI = FIDef->getOperand(1).getIndex();
11124 // Give the stack frame object a larger alignment if needed.
11125 if (MFI.getObjectAlign(FI) < Alignment)
11126 MFI.setObjectAlignment(FI, Alignment);
11127 }
11128 }
11129
11130 LLVM_DEBUG(dbgs() << "Inlining memmove: " << MI << " into loads & stores\n");
11131
11132 MachineIRBuilder MIB(MI);
11133 // Memmove requires that we perform the loads first before issuing the stores.
11134 // Apart from that, this loop is pretty much doing the same thing as the
11135 // memcpy codegen function.
11136 unsigned CurrOffset = 0;
11137 SmallVector<Register, 16> LoadVals;
11138 for (auto CopyTy : MemOps) {
11139 // Construct MMO for the load.
11140 auto *LoadMMO =
11141 MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes());
11142
11143 // Create the load.
11144 Register LoadPtr = Src;
11145 if (CurrOffset != 0) {
11146 LLT SrcTy = MRI.getType(Src);
11147 auto Offset =
11148 MIB.buildConstant(LLT::scalar(SrcTy.getSizeInBits()), CurrOffset);
11149 LoadPtr = MIB.buildObjectPtrOffset(SrcTy, Src, Offset).getReg(0);
11150 }
11151 LoadVals.push_back(MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO).getReg(0));
11152 CurrOffset += CopyTy.getSizeInBytes();
11153 }
11154
11155 CurrOffset = 0;
11156 for (unsigned I = 0; I < MemOps.size(); ++I) {
11157 LLT CopyTy = MemOps[I];
11158 // Now store the values loaded.
11159 auto *StoreMMO =
11160 MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes());
11161
11162 Register StorePtr = Dst;
11163 if (CurrOffset != 0) {
11164 LLT DstTy = MRI.getType(Dst);
11165 auto Offset =
11166 MIB.buildConstant(LLT::scalar(DstTy.getSizeInBits()), CurrOffset);
11167 StorePtr = MIB.buildObjectPtrOffset(DstTy, Dst, Offset).getReg(0);
11168 }
11169 MIB.buildStore(LoadVals[I], StorePtr, *StoreMMO);
11170 CurrOffset += CopyTy.getSizeInBytes();
11171 }
11172 MI.eraseFromParent();
11173 return Legalized;
11174}
11175
11178 const unsigned Opc = MI.getOpcode();
11179 // This combine is fairly complex so it's not written with a separate
11180 // matcher function.
11181 assert((Opc == TargetOpcode::G_MEMCPY || Opc == TargetOpcode::G_MEMMOVE ||
11182 Opc == TargetOpcode::G_MEMSET) &&
11183 "Expected memcpy like instruction");
11184
11185 auto MMOIt = MI.memoperands_begin();
11186 const MachineMemOperand *MemOp = *MMOIt;
11187
11188 Align DstAlign = MemOp->getBaseAlign();
11189 Align SrcAlign;
11190 auto [Dst, Src, Len] = MI.getFirst3Regs();
11191
11192 if (Opc != TargetOpcode::G_MEMSET) {
11193 assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI");
11194 MemOp = *(++MMOIt);
11195 SrcAlign = MemOp->getBaseAlign();
11196 }
11197
11198 // See if this is a constant length copy
11199 auto LenVRegAndVal = getIConstantVRegValWithLookThrough(Len, MRI);
11200 if (!LenVRegAndVal)
11201 return UnableToLegalize;
11202 uint64_t KnownLen = LenVRegAndVal->Value.getZExtValue();
11203
11204 if (KnownLen == 0) {
11205 MI.eraseFromParent();
11206 return Legalized;
11207 }
11208
11209 if (MaxLen && KnownLen > MaxLen)
11210 return UnableToLegalize;
11211
11212 bool IsVolatile = MemOp->isVolatile();
11213 if (Opc == TargetOpcode::G_MEMCPY) {
11214 auto &MF = *MI.getParent()->getParent();
11215 const auto &TLI = *MF.getSubtarget().getTargetLowering();
11216 bool OptSize = shouldLowerMemFuncForSize(MF);
11217 uint64_t Limit = TLI.getMaxStoresPerMemcpy(OptSize);
11218 return lowerMemcpy(MI, Dst, Src, KnownLen, Limit, DstAlign, SrcAlign,
11219 IsVolatile);
11220 }
11221 if (Opc == TargetOpcode::G_MEMMOVE)
11222 return lowerMemmove(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile);
11223 if (Opc == TargetOpcode::G_MEMSET)
11224 return lowerMemset(MI, Dst, Src, KnownLen, DstAlign, IsVolatile);
11225 return UnableToLegalize;
11226}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
constexpr LLT S32
constexpr LLT S64
AMDGPU Register Bank Select
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define X(NUM, ENUM, NAME)
Definition ELF.h:853
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file describes how to lower LLVM calls to machine code calls.
#define GISEL_VECREDUCE_CASES_NONSEQ
Definition Utils.h:75
static std::optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
This contains common code to allow clients to notify changes to machine instr.
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RTLIBCASE_CMP(LibcallPrefix, ICmpPred)
#define RTLIBCASE_INT(LibcallPrefix)
static bool findGISelOptimalMemOpLowering(std::vector< LLT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes, const TargetLowering &TLI)
static RTLIB::Libcall getOutlineAtomicLibcall(MachineInstr &MI)
static Register buildBitFieldInsert(MachineIRBuilder &B, Register TargetReg, Register InsertReg, Register OffsetBits)
Emit code to insert InsertReg into TargetRet at OffsetBits in TargetReg, while preserving other bits ...
static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB)
static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size)
static std::pair< RTLIB::Libcall, CmpInst::Predicate > getFCMPLibcallDesc(const CmpInst::Predicate Pred, unsigned Size)
Returns the corresponding libcall for the given Pred and the ICMP predicate that should be generated ...
static void broadcastSrcOp(SmallVectorImpl< SrcOp > &Ops, unsigned N, MachineOperand &Op)
Operand Op is used on N sub-instructions.
static bool isLibCallInTailPosition(const CallLowering::ArgInfo &Result, MachineInstr &MI, const TargetInstrInfo &TII, MachineRegisterInfo &MRI)
True if an instruction is in tail position in its caller.
static Register getBitcastWiderVectorElementOffset(MachineIRBuilder &B, Register Idx, unsigned NewEltSize, unsigned OldEltSize)
Figure out the bit offset into a register when coercing a vector index for the wide element type.
static void makeDstOps(SmallVectorImpl< DstOp > &DstOps, LLT Ty, unsigned NumElts)
Fill DstOps with DstOps that have same number of elements combined as the Ty.
static bool shouldLowerMemFuncForSize(const MachineFunction &MF)
#define LCALL5(A)
static MachineInstrBuilder SwapN(unsigned N, DstOp Dst, MachineIRBuilder &B, MachineInstrBuilder Src, const APInt &Mask)
static LegalizerHelper::LegalizeResult loweri64tof16ITOFP(MachineInstr &MI, Register Dst, LLT DstTy, Register Src, LLT SrcTy, MachineIRBuilder &MIRBuilder)
i64->fp16 itofp can be lowered to i64->f64,f64->f32,f32->f16.
static void emitLoadFromConstantPool(Register DstReg, const Constant *ConstVal, MachineIRBuilder &MIRBuilder)
static void getUnmergePieces(SmallVectorImpl< Register > &Pieces, MachineIRBuilder &B, Register Src, LLT Ty)
static CmpInst::Predicate minMaxToCompare(unsigned Opc)
static RTLIB::Libcall getStateLibraryFunctionFor(MachineInstr &MI, const TargetLowering &TLI)
static std::pair< int, int > getNarrowTypeBreakDown(LLT OrigTy, LLT NarrowTy, LLT &LeftoverTy)
Try to break down OrigTy into NarrowTy sized pieces.
static bool hasSameNumEltsOnAllVectorOperands(GenericMachineInstr &MI, MachineRegisterInfo &MRI, std::initializer_list< unsigned > NonVecOpIndices)
Check that all vector operands have same number of elements.
static Register clampVectorIndex(MachineIRBuilder &B, Register IdxReg, LLT VecTy)
static RTLIB::Libcall getConvRTLibDesc(unsigned Opcode, Type *ToType, Type *FromType)
static void getUnmergeResults(SmallVectorImpl< Register > &Regs, const MachineInstr &MI)
Append the result registers of G_UNMERGE_VALUES MI to Regs.
static bool isNonZeroModBitWidthOrUndef(const MachineRegisterInfo &MRI, Register Reg, unsigned BW)
#define RTLIBCASE(LibcallPrefix)
static Type * getFloatTypeForLLT(LLVMContext &Ctx, LLT Ty)
Interface for Targets to specify which operations they can successfully select and how the others sho...
Tracks DebugLocs between checkpoints and verifies that they are transferred.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
This file declares the MachineIRBuilder class.
Register Reg
Register const TargetRegisterInfo * TRI
#define R2(n)
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define T
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t High
R600 Clause Merge
static constexpr MCPhysReg SPReg
const SmallVectorImpl< MachineOperand > & Cond
Remove Loads Into Fake Uses
#define LLVM_DEBUG(...)
Definition Debug.h:119
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
This file describes how to lower LLVM code to machine code.
Value * RHS
Value * LHS
The Input class is used to parse a yaml document into in-memory structs and vectors.
static const fltSemantics & IEEEsingle()
Definition APFloat.h:296
static constexpr roundingMode rmTowardZero
Definition APFloat.h:348
static const fltSemantics & IEEEdouble()
Definition APFloat.h:297
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:344
opStatus
IEEE-754R 7: Default exception handling.
Definition APFloat.h:360
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
Definition APFloat.h:1406
APInt bitcastToAPInt() const
Definition APFloat.h:1430
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
Definition APFloat.h:1197
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Definition APFloat.h:1157
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Definition APFloat.h:1168
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1055
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
Definition APInt.h:230
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1563
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1535
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:968
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition APInt.h:207
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition APInt.h:1189
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition APInt.cpp:1709
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition APInt.h:210
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
Definition APInt.h:217
void negate()
Negate this APInt in place.
Definition APInt.h:1491
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
Definition APInt.cpp:652
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition APInt.h:220
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
Definition APInt.cpp:1028
APInt shl(unsigned shiftAmt) const
Left-shift function.
Definition APInt.h:880
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:858
static APInt getBitsSetWithWrap(unsigned numBits, unsigned loBit, unsigned hiBit)
Wrap version of getBitsSet.
Definition APInt.h:271
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:130
size_t size() const
Get the array size.
Definition ArrayRef.h:141
iterator begin() const
Definition ArrayRef.h:129
bool empty() const
Check if the array is empty.
Definition ArrayRef.h:136
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
bool isSigned() const
Definition InstrTypes.h:930
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
const APFloat & getValueAPF() const
Definition Constants.h:463
This is the shared class of boolean and integer constants.
Definition Constants.h:87
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
bool isBigEndian() const
Definition DataLayout.h:218
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:254
LLT getLLTTy(const MachineRegisterInfo &MRI) const
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition Function.h:714
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition Function.h:711
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:354
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
Represents any generic load, including sign/zero extending variants.
Register getDstReg() const
Get the definition register of the loaded value.
Register getValueReg() const
Get the stored value register.
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
Represents a insert subvector.
Represents any type of generic load or store.
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSize() const
Returns the size in bytes of the memory access.
bool isAtomic() const
Returns true if the attached MachineMemOperand has the atomic flag set.
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
Represents a threeway compare.
Represents a G_STORE.
A base class for all GenericMachineInstrs.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
static constexpr LLT float64()
Get a 64-bit IEEE double value.
LLT changeElementCount(ElementCount EC) const
Return a vector or scalar with the same element type and the new element count.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
LLT getScalarType() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
constexpr bool isByteSized() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr ElementCount getElementCount() const
static constexpr LLT float16()
Get a 16-bit IEEE half value.
constexpr unsigned getAddressSpace() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isPointerOrPointerVector() const
static LLT integer(unsigned SizeInBits)
static constexpr LLT bfloat16()
constexpr LLT changeVectorElementType(LLT NewEltTy) const
Returns a vector with the same number of elements but the new element type.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
LLT changeVectorElementCount(ElementCount EC) const
Return a vector with the same element type and the new element count.
static constexpr LLT float32()
Get a 32-bit IEEE float value.
static LLT floatIEEE(unsigned SizeInBits)
LLT changeElementSize(unsigned NewEltSize) const
If this type is a vector, return a vector with the same number of elements but the new element size.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
LLVM_ABI LegalizeResult lowerShlSat(MachineInstr &MI)
LLVM_ABI LegalizeResult narrowScalarCTPOP(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
LLVM_ABI LegalizeResult lowerThreewayCompare(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerFPTRUNC_F64_TO_F16(MachineInstr &MI)
LLVM_ABI LegalizeResult equalizeVectorShuffleLengths(MachineInstr &MI)
Equalize source and destination vector sizes of G_SHUFFLE_VECTOR.
LLVM_ABI LegalizeResult bitcastInsertVectorElt(MachineInstr &MI, unsigned TypeIdx, LLT CastTy)
Perform Bitcast legalize action on G_INSERT_VECTOR_ELT.
LLVM_ABI LegalizeResult lowerSITOFP(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerDynStackAlloc(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerBitCount(MachineInstr &MI)
LLVM_ABI LegalizeResult narrowScalarMul(MachineInstr &MI, LLT Ty)
LLVM_ABI LegalizeResult lowerFMinNumMaxNum(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerU64ToF64BitFloatOps(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerSSUBE(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerIntrinsicRound(MachineInstr &MI)
LLVM_ABI void widenScalarSrc(MachineInstr &MI, LLT WideTy, unsigned OpIdx, unsigned ExtOpcode)
Legalize a single operand OpIdx of the machine instruction MI as a Use by extending the operand's typ...
LLVM_ABI LegalizeResult moreElementsVectorShuffle(MachineInstr &MI, unsigned TypeIdx, LLT MoreTy)
LLVM_ABI LegalizeResult lowerSMULH_UMULH(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerLoad(GAnyLoad &MI)
LLVM_ABI LegalizeResult fewerElementsVectorShuffle(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy)
LLVM_ABI LegalizeResult lowerAbsToAddXor(MachineInstr &MI)
LLVM_ABI void moreElementsVectorDst(MachineInstr &MI, LLT MoreTy, unsigned OpIdx)
Legalize a single operand OpIdx of the machine instruction MI as a Def by performing it with addition...
LegalizerHelper::LegalizeResult createAtomicLibcall(MachineInstr &MI) const
LLVM_ABI LegalizeResult lowerFConstant(MachineInstr &MI)
LLVM_ABI LegalizeResult narrowScalarCTTZ(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
LLVM_ABI LegalizeResult lowerBitreverse(MachineInstr &MI)
LLVM_ABI LegalizeResult narrowScalarShift(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
LLVM_ABI LegalizeResult lowerExtractInsertVectorElt(MachineInstr &MI)
Lower a vector extract or insert by writing the vector to a stack temporary and reloading the element...
LLVM_ABI LegalizeResult moreElementsVector(MachineInstr &MI, unsigned TypeIdx, LLT MoreTy)
Legalize a vector instruction by increasing the number of vector elements involved and ignoring the a...
LLVM_ABI LegalizeResult lowerFunnelShiftWithInverse(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerAbsToMaxNeg(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerFPTOINT_SAT(MachineInstr &MI)
LLVM_ABI LegalizeResult narrowScalarCTLS(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
LLVM_ABI LegalizeResult lowerEXT(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerStore(GStore &MI)
LLVM_ABI LegalizeResult lowerAbsToCNeg(MachineInstr &MI)
LLVM_ABI LegalizeResult bitcastExtractSubvector(MachineInstr &MI, unsigned TypeIdx, LLT CastTy)
This attempts to bitcast G_EXTRACT_SUBVECTOR to CastTy.
LLVM_ABI LegalizeResult narrowScalarShiftMultiway(MachineInstr &MI, LLT TargetTy)
Multi-way shift legalization: directly split wide shifts into target-sized parts in a single step,...
LLVM_ABI LegalizeResult lowerSADDO_SSUBO(MachineInstr &MI)
LLVM_ABI MachineInstrBuilder createStackTemporary(TypeSize Bytes, Align Alignment, MachinePointerInfo &PtrInfo)
Create a stack temporary based on the size in bytes and the alignment.
LLVM_ABI Register buildConstantShiftPart(unsigned Opcode, unsigned PartIdx, unsigned NumParts, ArrayRef< Register > SrcParts, const ShiftParams &Params, LLT TargetTy, LLT ShiftAmtTy)
Generates a single output part for constant shifts using direct indexing.
LLVM_ABI void narrowScalarSrc(MachineInstr &MI, LLT NarrowTy, unsigned OpIdx)
Legalize a single operand OpIdx of the machine instruction MI as a Use by truncating the operand's ty...
LLVM_ABI LegalizeResult fewerElementsVectorPhi(GenericMachineInstr &MI, unsigned NumElts)
LLVM_ABI LegalizeResult lowerFPTOUI(MachineInstr &MI)
const TargetLowering & getTargetLowering() const
LLVM_ABI LegalizeResult narrowScalar(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy)
Legalize an instruction by reducing the width of the underlying scalar type.
LLVM_ABI LegalizeResult narrowScalarFPTOI(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
LLVM_ABI LegalizeResult bitcastInsertSubvector(MachineInstr &MI, unsigned TypeIdx, LLT CastTy)
This attempts to bitcast G_INSERT_SUBVECTOR to CastTy.
LLVM_ABI LegalizerHelper(MachineFunction &MF, GISelChangeObserver &Observer, MachineIRBuilder &B, const LibcallLoweringInfo *Libcalls=nullptr)
LLVM_ABI LegalizeResult lowerUnmergeValues(MachineInstr &MI)
LLVM_ABI LegalizeResult bitcast(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by replacing the value type.
LLVM_ABI LegalizeResult scalarizeVectorBooleanStore(GStore &MI)
Given a store of a boolean vector, scalarize it.
LLVM_ABI LegalizeResult lowerBitcast(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerMinMax(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerFunnelShiftAsShifts(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerInsert(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerReadWriteRegister(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerExtract(MachineInstr &MI)
LLVM_ABI LegalizeResult fewerElementsBitcast(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy)
LLVM_ABI LegalizeResult narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt, LLT HalfTy, LLT ShiftAmtTy)
LLVM_ABI LegalizeResult lowerISFPCLASS(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerAbsDiffToSelect(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerAddSubSatToMinMax(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerFPOWI(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerFPExtAndTruncMem(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerFAbs(MachineInstr &MI)
LLVM_ABI LegalizeResult narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
LLVM_ABI LegalizeResult lowerVectorReduction(MachineInstr &MI)
const LegalizerInfo & getLegalizerInfo() const
Expose LegalizerInfo so the clients can re-use.
LLVM_ABI LegalizeResult reduceLoadStoreWidth(GLoadStore &MI, unsigned TypeIdx, LLT NarrowTy)
LLVM_ABI LegalizeResult fewerElementsVectorMultiEltType(GenericMachineInstr &MI, unsigned NumElts, std::initializer_list< unsigned > NonVecOpIndices={})
Handles most opcodes.
LLVM_ABI LegalizeResult narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
LLVM_ABI LegalizeResult narrowScalarShiftByConstantMultiway(MachineInstr &MI, const APInt &Amt, LLT TargetTy, LLT ShiftAmtTy)
Optimized path for constant shift amounts using static indexing.
LLVM_ABI MachineInstrBuilder createStackStoreLoad(const DstOp &Res, const SrcOp &Val)
Create a store of Val to a stack temporary and return a load as the same type as Res.
LLVM_ABI LegalizeResult lowerVAArg(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerFMODF(MachineInstr &MI)
@ Legalized
Instruction has been legalized and the MachineFunction changed.
@ AlreadyLegal
Instruction was already legal and no change was made to the MachineFunction.
@ UnableToLegalize
Some kind of error has occurred and we could not legalize this instruction.
LLVM_ABI LegalizeResult moreElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx, LLT MoreTy)
LLVM_ABI LegalizeResult lowerU64ToF32BitOps(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerFCopySign(MachineInstr &MI)
LLVM_ABI LegalizeResult bitcastConcatVector(MachineInstr &MI, unsigned TypeIdx, LLT CastTy)
LLVM_ABI LegalizeResult lowerRotateWithReverseRotate(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerSADDE(MachineInstr &MI)
LLVM_ABI LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by splitting it into simpler parts, hopefully understood by the target.
LLVM_ABI LegalizeResult lowerFunnelShift(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerFPTRUNC_F32_TO_BF16(MachineInstr &MI)
LLVM_ABI LegalizeResult fewerElementsVector(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy)
Legalize a vector instruction by splitting into multiple components, each acting on the same scalar t...
GISelChangeObserver & Observer
To keep track of changes made by the LegalizerHelper.
LLVM_ABI LegalizeResult conversionLibcall(MachineInstr &MI, Type *ToType, Type *FromType, LostDebugLocObserver &LocObserver, bool IsSigned=false) const
LLVM_ABI void bitcastDst(MachineInstr &MI, LLT CastTy, unsigned OpIdx)
Legalize a single operand OpIdx of the machine instruction MI as a def by inserting a G_BITCAST from ...
LLVM_ABI LegalizeResult lowerFPTRUNC(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerFMad(MachineInstr &MI)
LLVM_ABI LegalizeResult widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy)
Legalize an instruction by performing the operation on a wider scalar type (for example a 16-bit addi...
LLVM_ABI LegalizeResult lowerAddSubSatToAddoSubo(MachineInstr &MI)
LLVM_ABI LegalizeResult narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
LLVM_ABI LegalizeResult lowerFFloor(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerAbsDiffToMinMax(MachineInstr &MI)
LLVM_ABI LegalizeResult narrowScalarExt(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
LLVM_ABI LegalizeResult fewerElementsVectorSeqReductions(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy)
LLVM_ABI Register getDynStackAllocTargetPtr(Register SPReg, Register AllocSize, Align Alignment, LLT PtrTy)
LLVM_ABI LegalizeResult lowerFPTOSI(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerUITOFP(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerShuffleVector(MachineInstr &MI)
LLVM_ABI LegalizeResult fewerElementsVectorMerge(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy)
LLVM_ABI LegalizeResult lowerMergeValues(MachineInstr &MI)
LLVM_ABI LegalizeResult fewerElementsVectorUnmergeValues(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy)
LLVM_ABI LegalizeResult createMemLibcall(MachineRegisterInfo &MRI, MachineInstr &MI, LostDebugLocObserver &LocObserver) const
Create a libcall to memcpy et al.
LLVM_ABI LegalizeResult lowerVECTOR_COMPRESS(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerMulfix(MachineInstr &MI)
LLVM_ABI void moreElementsVectorSrc(MachineInstr &MI, LLT MoreTy, unsigned OpIdx)
Legalize a single operand OpIdx of the machine instruction MI as a Use by producing a vector with und...
LLVM_ABI LegalizeResult bitcastExtractVectorElt(MachineInstr &MI, unsigned TypeIdx, LLT CastTy)
Perform Bitcast legalize action on G_EXTRACT_VECTOR_ELT.
LLVM_ABI LegalizeResult lowerRotate(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerU64ToF32WithSITOFP(MachineInstr &MI)
LLVM_ABI LegalizeResult createLibcall(const char *Name, const CallLowering::ArgInfo &Result, ArrayRef< CallLowering::ArgInfo > Args, CallingConv::ID CC, LostDebugLocObserver &LocObserver, MachineInstr *MI=nullptr) const
Helper function that creates a libcall to the given Name using the given calling convention CC.
LLVM_ABI LegalizeResult lowerMemCpyFamily(MachineInstr &MI, unsigned MaxLen=0)
LLVM_ABI Register coerceToScalar(Register Val)
Cast the given value to an LLT::scalar with an equivalent size.
LLVM_ABI LegalizeResult bitcastShuffleVector(MachineInstr &MI, unsigned TypeIdx, LLT CastTy)
LLVM_ABI LegalizeResult lowerDIVREM(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerSelect(MachineInstr &MI)
LLVM_ABI LegalizeResult narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
LLVM_ABI LegalizeResult narrowScalarFLDEXP(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
LLVM_ABI Register buildVariableShiftPart(unsigned Opcode, Register MainOperand, Register ShiftAmt, LLT TargetTy, Register CarryOperand=Register())
Generates a shift part with carry for variable shifts.
LLVM_ABI void bitcastSrc(MachineInstr &MI, LLT CastTy, unsigned OpIdx)
Legalize a single operand OpIdx of the machine instruction MI as a use by inserting a G_BITCAST to Ca...
LLVM_ABI void narrowScalarDst(MachineInstr &MI, LLT NarrowTy, unsigned OpIdx, unsigned ExtOpcode)
LLVM_ABI LegalizeResult libcall(MachineInstr &MI, LostDebugLocObserver &LocObserver)
Legalize an instruction by emiting a runtime library call instead.
LLVM_ABI LegalizeResult lowerStackRestore(MachineInstr &MI)
LLVM_ABI LegalizeResult fewerElementsVectorReductions(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy)
LLVM_ABI LegalizeResult lowerStackSave(MachineInstr &MI)
LLVM_ABI LegalizeResult fewerElementsVectorExtractInsertVectorElt(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy)
LLVM_ABI LegalizeResult narrowScalarCTLZ(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LLVM_ABI LegalizeResult lowerTRUNC(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerBswap(MachineInstr &MI)
LLVM_ABI Register getVectorElementPointer(Register VecPtr, LLT VecTy, Register Index)
Get a pointer to vector element Index located in memory for a vector of type VecTy starting at a base...
LLVM_ABI LegalizeResult narrowScalarAddSub(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy)
LLVM_ABI Align getStackTemporaryAlignment(LLT Type, Align MinAlign=Align()) const
Return the alignment to use for a stack temporary object with the given type.
LLVM_ABI LegalizeResult lowerConstant(MachineInstr &MI)
LLVM_ABI void widenScalarDst(MachineInstr &MI, LLT WideTy, unsigned OpIdx=0, unsigned TruncOpcode=TargetOpcode::G_TRUNC)
Legalize a single operand OpIdx of the machine instruction MI as a Def by extending the operand's typ...
LLVM_ABI LegalizeResult simpleLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, unsigned Size, Type *OpType, LostDebugLocObserver &LocObserver) const
LLVM_ABI LegalizeResult legalizeInstrStep(MachineInstr &MI, LostDebugLocObserver &LocObserver)
Replace MI by a sequence of legal instructions that can implement the same operation.
LLVM_ABI LegalizeResult lowerFMinimumMaximum(MachineInstr &MI)
Tracks which library functions to use for a particular subtarget.
TypeSize getValue() const
void checkpoint(bool CheckDebugLocs=true)
Call this to indicate that it's a good point to assess whether locations have been lost.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
A single uniqued string.
Definition Metadata.h:722
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:632
Machine Value Type.
static LLVM_ABI MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
LLVM_ABI iterator getFirstTerminatorForward()
Finds the first terminator in a block by scanning forward.
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
void setObjectAlignment(int ObjectIdx, Align Alignment)
setObjectAlignment - Change the alignment of the specified stack object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder buildConstantPool(const DstOp &Res, unsigned Idx)
Build and insert Res = G_CONSTANT_POOL Idx.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildURem(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_UREM Op0, Op1.
MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildIntToPtr(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_INTTOPTR instruction.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildNeg(const DstOp &Dst, const SrcOp &Src0)
Build and insert integer negation Zero = G_CONSTANT 0 Res = G_SUB Zero, Op0.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildUITOFP(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_UITOFP Src0.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildSITOFP(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_SITOFP Src0.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildBitcast(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_BITCAST Src.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildOr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_OR Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI MachineInstrBundleIterator< MachineInstr > eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
A description of a memory reference used in the backend.
void setType(LLT NewTy)
Reset the tracked memory type.
LLT getMemoryType() const
Return the memory type of the memory reference.
void clearRanges()
Unset the tracked range metadata.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
const ConstantInt * getCImm() const
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
void setCImm(const ConstantInt *CI)
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isValid() const
Definition Register.h:112
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
LLT getLLTTy(const MachineRegisterInfo &MRI) const
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
constexpr const char * data() const
Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:138
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition Type.cpp:483
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
virtual LLT getOptimalMemOpLLT(const MemOp &Op, const AttributeList &) const
LLT returning variant.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
const Triple & getTargetTriple() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Definition Triple.h:645
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:313
static LLVM_ABI Type * getFP128Ty(LLVMContext &C)
Definition Type.cpp:295
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:286
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:317
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
Definition Type.cpp:291
static LLVM_ABI Type * getX86_FP80Ty(LLVMContext &C)
Definition Type.cpp:294
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:290
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:288
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ FewerElements
The (vector) operation should be implemented by splitting it into sub-vectors where the operation is ...
@ Libcall
The operation should be implemented as a call to some kind of runtime support library.
@ WidenScalar
The operation should be implemented in terms of a wider scalar base-type.
@ Bitcast
Perform the operation on a different, but equivalently sized type.
@ NarrowScalar
The operation should be synthesized from multiple instructions acting on a narrower scalar base-type.
@ MoreElements
The (vector) operation should be implemented by widening the input vector and ignoring the lanes adde...
ConstantMatch< APInt > m_ICst(APInt &Cst)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
LLVM_ABI Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Invariant opcodes: All instruction sets have these as their low opcodes.
This is an optimization pass for GlobalISel generic memory operations.
IterT next_nodbg(IterT It, IterT End, bool SkipPseudoOp=true)
Increment It, then continue incrementing it while it points to a debug instruction.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:315
@ Offset
Definition DWP.cpp:557
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:830
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
Definition Utils.cpp:1984
LLVM_ABI MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
Definition Utils.cpp:653
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1668
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
Definition Utils.cpp:294
@ Undef
Value of the register doesn't matter.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
constexpr int64_t minIntN(int64_t N)
Gets the minimum value for a N-bit signed integer.
Definition MathExtras.h:223
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2207
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
LLVM_ABI std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a splat vector of constant integers.
Definition Utils.cpp:1527
LLVM_ABI bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg, std::function< bool(const Constant *ConstVal)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant G_B...
Definition Utils.cpp:1584
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition STLExtras.h:1151
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
Definition MathExtras.h:385
LLVM_ABI LLVM_READNONE LLT getLCMType(LLT OrigTy, LLT TargetTy)
Return the least common multiple type of OrigTy and TargetTy, by changing the number of vector elemen...
Definition Utils.cpp:1151
unsigned M1(unsigned Val)
Definition VE.h:377
constexpr T MinAlign(U A, V B)
A and B are either alignments or offsets.
Definition MathExtras.h:357
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:209
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Success
The lock was released successfully.
LLVM_ABI EVT getApproximateEVTForLLT(LLT Ty, LLVMContext &Ctx)
LLVM_ABI void extractParts(Register Reg, LLT Ty, int NumParts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Helper function to split a wide generic register into bitwise blocks with the given Type (which impli...
Definition Utils.cpp:508
To bit_cast(const From &from) noexcept
Definition bit.h:90
@ Mul
Product of integers.
@ FSub
Subtraction of floats.
@ Xor
Bitwise or logical XOR of integers.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1884
constexpr int64_t maxIntN(int64_t N)
Gets the maximum value for a N-bit signed integer.
Definition MathExtras.h:232
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition Utils.cpp:433
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition Alignment.h:100
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition Alignment.h:197
LLVM_ABI LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
Definition Utils.cpp:1239
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition bit.h:347
@ Custom
The result value requires a custom uniformity check.
Definition Uniformity.h:31
LLVM_ABI void extractVectorParts(Register Reg, unsigned NumElts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Version which handles irregular sub-vector splits.
Definition Utils.cpp:611
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition MathExtras.h:373
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:876
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
SmallVector< ISD::ArgFlagsTy, 4 > Flags
CallingConv::ID CallConv
Calling convention to be used for the call.
bool isKnownNeverZero() const
Return true if it's known this can never be a zero.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
Matching combinators.
This class contains a discriminated union of information about pointers in memory operands,...
LLVM_ABI unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, bool IsZeroMemset, bool IsVolatile)
static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, Align SrcAlign, bool IsVolatile, bool MemcpyStrSrc=false)
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.