LLVM 23.0.0git
WebAssemblyISelLowering.cpp
Go to the documentation of this file.
1//=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the WebAssemblyTargetLowering class.
11///
12//===----------------------------------------------------------------------===//
13
32#include "llvm/IR/Function.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/IntrinsicsWebAssembly.h"
40using namespace llvm;
41
42#define DEBUG_TYPE "wasm-lower"
43
45 const TargetMachine &TM, const WebAssemblySubtarget &STI)
46 : TargetLowering(TM, STI), Subtarget(&STI) {
47 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
48
49 // Set the load count for memcmp expand optimization
52
53 // Booleans always contain 0 or 1.
55 // Except in SIMD vectors
57 // We don't know the microarchitecture here, so just reduce register pressure.
59 // Tell ISel that we have a stack pointer.
61 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
62 // Set up the register classes.
63 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
64 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
65 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
66 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
67 if (Subtarget->hasSIMD128()) {
68 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
69 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
70 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
71 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
72 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
73 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
74 }
75 if (Subtarget->hasFP16()) {
76 addRegisterClass(MVT::v8f16, &WebAssembly::V128RegClass);
77 }
78 if (Subtarget->hasReferenceTypes()) {
79 addRegisterClass(MVT::externref, &WebAssembly::EXTERNREFRegClass);
80 addRegisterClass(MVT::funcref, &WebAssembly::FUNCREFRegClass);
81 if (Subtarget->hasExceptionHandling()) {
82 addRegisterClass(MVT::exnref, &WebAssembly::EXNREFRegClass);
83 }
84 }
85 // Compute derived properties from the register classes.
86 computeRegisterProperties(Subtarget->getRegisterInfo());
87
88 // Transform loads and stores to pointers in address space 1 to loads and
89 // stores to WebAssembly global variables, outside linear memory.
90 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) {
93 }
94 if (Subtarget->hasSIMD128()) {
95 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
96 MVT::v2f64}) {
99 }
100 }
101 if (Subtarget->hasFP16()) {
102 setOperationAction(ISD::LOAD, MVT::v8f16, Custom);
104 }
105 if (Subtarget->hasReferenceTypes()) {
106 // We need custom load and store lowering for both externref, funcref and
107 // Other. The MVT::Other here represents tables of reference types.
108 for (auto T : {MVT::externref, MVT::funcref, MVT::Other}) {
111 }
112 }
113
121
122 // Take the default expansion for va_arg, va_copy, and va_end. There is no
123 // default action for va_start, so we do that custom.
128
129 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64, MVT::v8f16}) {
130 if (!Subtarget->hasFP16() && T == MVT::v8f16) {
131 continue;
132 }
133 // Don't expand the floating-point types to constant pools.
135 // Expand floating-point comparisons.
136 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
139 // Expand floating-point library function operators.
142 // Expand vector FREM, but use a libcall rather than an expansion for scalar
143 if (MVT(T).isVector())
145 else
147 // Note supported floating-point library function operators that otherwise
148 // default to expand.
152 // Support minimum and maximum, which otherwise default to expand.
155 // When experimental v8f16 support is enabled these instructions don't need
156 // to be expanded.
157 if (T != MVT::v8f16) {
160 }
161 if (Subtarget->hasFP16() && T == MVT::f32) {
163 setTruncStoreAction(T, MVT::f16, Legal);
164 } else {
166 setTruncStoreAction(T, MVT::f16, Expand);
167 }
168 }
169
170 // Expand unavailable integer operations.
171 for (auto Op :
175 for (auto T : {MVT::i32, MVT::i64})
177 if (Subtarget->hasSIMD128())
178 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
180 }
181
182 if (Subtarget->hasWideArithmetic()) {
188 }
189
190 if (Subtarget->hasNontrappingFPToInt())
192 for (auto T : {MVT::i32, MVT::i64})
194
195 if (Subtarget->hasRelaxedSIMD()) {
198 {MVT::v4f32, MVT::v2f64}, Custom);
199 }
200 // SIMD-specific configuration
201 if (Subtarget->hasSIMD128()) {
202
204
205 // Combine wide-vector muls, with extend inputs, to extmul_half.
208
209 // Combine vector mask reductions into alltrue/anytrue
211
212 // Convert vector to integer bitcasts to bitmask
214
215 // Hoist bitcasts out of shuffles
217
218 // Combine extends of extract_subvectors into widening ops
220
221 // Combine int_to_fp or fp_extend of extract_vectors and vice versa into
222 // conversions ops
225
226 // Combine fp_to_{s,u}int_sat or fp_round of concat_vectors or vice versa
227 // into conversion ops
231
233
234 // Support saturating add/sub for i8x16 and i16x8
236 for (auto T : {MVT::v16i8, MVT::v8i16})
238
239 // Support integer abs
240 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
242
243 // Custom lower BUILD_VECTORs to minimize number of replace_lanes
244 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
245 MVT::v2f64})
247
248 if (Subtarget->hasFP16()) {
251 }
252
253 // We have custom shuffle lowering to expose the shuffle mask
254 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
255 MVT::v2f64})
257
258 if (Subtarget->hasFP16())
260
261 // Support splatting
262 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
263 MVT::v2f64})
265
266 setOperationAction(ISD::AVGCEILU, {MVT::v8i16, MVT::v16i8}, Legal);
267
268 // Custom lowering since wasm shifts must have a scalar shift amount
269 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
270 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
272
273 // Custom lower lane accesses to expand out variable indices
275 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
276 MVT::v2f64})
278
279 // There is no i8x16.mul instruction
280 setOperationAction(ISD::MUL, MVT::v16i8, Expand);
281
282 // Expand integer operations supported for scalars but not SIMD
283 for (auto Op :
285 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
287
288 // But we do have integer min and max operations
289 for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
290 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
292
293 // And we have popcnt for i8x16. It can be used to expand ctlz/cttz.
294 setOperationAction(ISD::CTPOP, MVT::v16i8, Legal);
295 setOperationAction(ISD::CTLZ, MVT::v16i8, Expand);
296 setOperationAction(ISD::CTTZ, MVT::v16i8, Expand);
297
298 // Custom lower bit counting operations for other types to scalarize them.
299 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP})
300 for (auto T : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
302
303 // Expand float operations supported for scalars but not SIMD
306 for (auto T : {MVT::v4f32, MVT::v2f64})
308
309 // Unsigned comparison operations are unavailable for i64x2 vectors.
311 setCondCodeAction(CC, MVT::v2i64, Custom);
312
313 // 64x2 conversions are not in the spec
314 for (auto Op :
316 for (auto T : {MVT::v2i64, MVT::v2f64})
318
319 // But saturating fp_to_int converstions are
321 setOperationAction(Op, MVT::v4i32, Custom);
322 if (Subtarget->hasFP16()) {
323 setOperationAction(Op, MVT::v8i16, Custom);
324 }
325 }
326
327 // Support vector extending
332 }
333
334 if (Subtarget->hasFP16()) {
335 setOperationAction(ISD::FMA, MVT::v8f16, Legal);
336 }
337
338 if (Subtarget->hasRelaxedSIMD()) {
341 }
342
343 // Partial MLA reductions.
345 setPartialReduceMLAAction(Op, MVT::v4i32, MVT::v16i8, Legal);
346 setPartialReduceMLAAction(Op, MVT::v4i32, MVT::v8i16, Legal);
347 }
348 }
349
350 // As a special case, these operators use the type to mean the type to
351 // sign-extend from.
353 if (!Subtarget->hasSignExt()) {
354 // Sign extends are legal only when extending a vector extract
355 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
356 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
358 }
361
362 // Dynamic stack allocation: use the default expansion.
366
370
371 // Expand these forms; we pattern-match the forms that we can handle in isel.
372 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
373 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
375
376 if (Subtarget->hasReferenceTypes())
377 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
378 for (auto T : {MVT::externref, MVT::funcref})
380
381 // There is no vector conditional select instruction
382 for (auto T :
383 {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, MVT::v2f64})
385
386 // We have custom switch handling.
388
389 // WebAssembly doesn't have:
390 // - Floating-point extending loads.
391 // - Floating-point truncating stores.
392 // - i1 extending loads.
393 // - truncating SIMD stores and most extending loads
394 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
395 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
396 for (auto T : MVT::integer_valuetypes())
397 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
398 setLoadExtAction(Ext, T, MVT::i1, Promote);
399 if (Subtarget->hasSIMD128()) {
400 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
401 MVT::v2f64}) {
402 for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
403 if (MVT(T) != MemT) {
405 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
406 setLoadExtAction(Ext, T, MemT, Expand);
407 }
408 }
409 }
410 // But some vector extending loads are legal
411 for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
412 setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal);
413 setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
414 setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
415 }
416 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Legal);
417 }
418
419 // Don't do anything clever with build_pairs
421
422 // Trap lowers to wasm unreachable
423 setOperationAction(ISD::TRAP, MVT::Other, Legal);
425
426 // Exception handling intrinsics
430
432
433 // Always convert switches to br_tables unless there is only one case, which
434 // is equivalent to a simple branch. This reduces code size for wasm, and we
435 // defer possible jump table optimizations to the VM.
437}
438
447
456
458WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(
459 const AtomicRMWInst *AI) const {
460 // We have wasm instructions for these
461 switch (AI->getOperation()) {
469 default:
470 break;
471 }
473}
474
475bool WebAssemblyTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
476 // Implementation copied from X86TargetLowering.
477 unsigned Opc = VecOp.getOpcode();
478
479 // Assume target opcodes can't be scalarized.
480 // TODO - do we have any exceptions?
482 return false;
483
484 // If the vector op is not supported, try to convert to scalar.
485 EVT VecVT = VecOp.getValueType();
487 return true;
488
489 // If the vector op is supported, but the scalar op is not, the transform may
490 // not be worthwhile.
491 EVT ScalarVT = VecVT.getScalarType();
492 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
493}
494
495FastISel *WebAssemblyTargetLowering::createFastISel(
496 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo,
497 const LibcallLoweringInfo *LibcallLowering) const {
498 return WebAssembly::createFastISel(FuncInfo, LibInfo, LibcallLowering);
499}
500
501MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
502 EVT VT) const {
503 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
504 if (BitWidth > 1 && BitWidth < 8)
505 BitWidth = 8;
506
507 if (BitWidth > 64) {
508 // The shift will be lowered to a libcall, and compiler-rt libcalls expect
509 // the count to be an i32.
510 BitWidth = 32;
512 "32-bit shift counts ought to be enough for anyone");
513 }
514
517 "Unable to represent scalar shift amount type");
518 return Result;
519}
520
521// Lower an fp-to-int conversion operator from the LLVM opcode, which has an
522// undefined result on invalid/overflow, to the WebAssembly opcode, which
523// traps on invalid/overflow.
526 const TargetInstrInfo &TII,
527 bool IsUnsigned, bool Int64,
528 bool Float64, unsigned LoweredOpcode) {
530
531 Register OutReg = MI.getOperand(0).getReg();
532 Register InReg = MI.getOperand(1).getReg();
533
534 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
535 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
536 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
537 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
538 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
539 unsigned Eqz = WebAssembly::EQZ_I32;
540 unsigned And = WebAssembly::AND_I32;
541 int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
542 int64_t Substitute = IsUnsigned ? 0 : Limit;
543 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
544 auto &Context = BB->getParent()->getFunction().getContext();
545 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
546
547 const BasicBlock *LLVMBB = BB->getBasicBlock();
548 MachineFunction *F = BB->getParent();
549 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
550 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
551 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
552
554 F->insert(It, FalseMBB);
555 F->insert(It, TrueMBB);
556 F->insert(It, DoneMBB);
557
558 // Transfer the remainder of BB and its successor edges to DoneMBB.
559 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
561
562 BB->addSuccessor(TrueMBB);
563 BB->addSuccessor(FalseMBB);
564 TrueMBB->addSuccessor(DoneMBB);
565 FalseMBB->addSuccessor(DoneMBB);
566
567 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
568 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
569 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
570 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
571 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
572 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
573 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
574
575 MI.eraseFromParent();
576 // For signed numbers, we can do a single comparison to determine whether
577 // fabs(x) is within range.
578 if (IsUnsigned) {
579 Tmp0 = InReg;
580 } else {
581 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
582 }
583 BuildMI(BB, DL, TII.get(FConst), Tmp1)
584 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
585 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
586
587 // For unsigned numbers, we have to do a separate comparison with zero.
588 if (IsUnsigned) {
589 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
590 Register SecondCmpReg =
591 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
592 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
593 BuildMI(BB, DL, TII.get(FConst), Tmp1)
594 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
595 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
596 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
597 CmpReg = AndReg;
598 }
599
600 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
601
602 // Create the CFG diamond to select between doing the conversion or using
603 // the substitute value.
604 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
605 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
606 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
607 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
608 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
609 .addReg(FalseReg)
610 .addMBB(FalseMBB)
611 .addReg(TrueReg)
612 .addMBB(TrueMBB);
613
614 return DoneMBB;
615}
616
617// Lower a `MEMCPY` instruction into a CFG triangle around a `MEMORY_COPY`
618// instuction to handle the zero-length case.
621 const TargetInstrInfo &TII, bool Int64) {
623
624 MachineOperand DstMem = MI.getOperand(0);
625 MachineOperand SrcMem = MI.getOperand(1);
626 MachineOperand Dst = MI.getOperand(2);
627 MachineOperand Src = MI.getOperand(3);
628 MachineOperand Len = MI.getOperand(4);
629
630 // If the length is a constant, we don't actually need the check.
631 if (MachineInstr *Def = MRI.getVRegDef(Len.getReg())) {
632 if (Def->getOpcode() == WebAssembly::CONST_I32 ||
633 Def->getOpcode() == WebAssembly::CONST_I64) {
634 if (Def->getOperand(1).getImm() == 0) {
635 // A zero-length memcpy is a no-op.
636 MI.eraseFromParent();
637 return BB;
638 }
639 // A non-zero-length memcpy doesn't need a zero check.
640 unsigned MemoryCopy =
641 Int64 ? WebAssembly::MEMORY_COPY_A64 : WebAssembly::MEMORY_COPY_A32;
642 BuildMI(*BB, MI, DL, TII.get(MemoryCopy))
643 .add(DstMem)
644 .add(SrcMem)
645 .add(Dst)
646 .add(Src)
647 .add(Len);
648 MI.eraseFromParent();
649 return BB;
650 }
651 }
652
653 // We're going to add an extra use to `Len` to test if it's zero; that
654 // use shouldn't be a kill, even if the original use is.
655 MachineOperand NoKillLen = Len;
656 NoKillLen.setIsKill(false);
657
658 // Decide on which `MachineInstr` opcode we're going to use.
659 unsigned Eqz = Int64 ? WebAssembly::EQZ_I64 : WebAssembly::EQZ_I32;
660 unsigned MemoryCopy =
661 Int64 ? WebAssembly::MEMORY_COPY_A64 : WebAssembly::MEMORY_COPY_A32;
662
663 // Create two new basic blocks; one for the new `memory.fill` that we can
664 // branch over, and one for the rest of the instructions after the original
665 // `memory.fill`.
666 const BasicBlock *LLVMBB = BB->getBasicBlock();
667 MachineFunction *F = BB->getParent();
668 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
669 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
670
672 F->insert(It, TrueMBB);
673 F->insert(It, DoneMBB);
674
675 // Transfer the remainder of BB and its successor edges to DoneMBB.
676 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
678
679 // Connect the CFG edges.
680 BB->addSuccessor(TrueMBB);
681 BB->addSuccessor(DoneMBB);
682 TrueMBB->addSuccessor(DoneMBB);
683
684 // Create a virtual register for the `Eqz` result.
685 unsigned EqzReg;
686 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
687
688 // Erase the original `memory.copy`.
689 MI.eraseFromParent();
690
691 // Test if `Len` is zero.
692 BuildMI(BB, DL, TII.get(Eqz), EqzReg).add(NoKillLen);
693
694 // Insert a new `memory.copy`.
695 BuildMI(TrueMBB, DL, TII.get(MemoryCopy))
696 .add(DstMem)
697 .add(SrcMem)
698 .add(Dst)
699 .add(Src)
700 .add(Len);
701
702 // Create the CFG triangle.
703 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(DoneMBB).addReg(EqzReg);
704 BuildMI(TrueMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
705
706 return DoneMBB;
707}
708
709// Lower a `MEMSET` instruction into a CFG triangle around a `MEMORY_FILL`
710// instuction to handle the zero-length case.
713 const TargetInstrInfo &TII, bool Int64) {
715
716 MachineOperand Mem = MI.getOperand(0);
717 MachineOperand Dst = MI.getOperand(1);
718 MachineOperand Val = MI.getOperand(2);
719 MachineOperand Len = MI.getOperand(3);
720
721 // If the length is a constant, we don't actually need the check.
722 if (MachineInstr *Def = MRI.getVRegDef(Len.getReg())) {
723 if (Def->getOpcode() == WebAssembly::CONST_I32 ||
724 Def->getOpcode() == WebAssembly::CONST_I64) {
725 if (Def->getOperand(1).getImm() == 0) {
726 // A zero-length memset is a no-op.
727 MI.eraseFromParent();
728 return BB;
729 }
730 // A non-zero-length memset doesn't need a zero check.
731 unsigned MemoryFill =
732 Int64 ? WebAssembly::MEMORY_FILL_A64 : WebAssembly::MEMORY_FILL_A32;
733 BuildMI(*BB, MI, DL, TII.get(MemoryFill))
734 .add(Mem)
735 .add(Dst)
736 .add(Val)
737 .add(Len);
738 MI.eraseFromParent();
739 return BB;
740 }
741 }
742
743 // We're going to add an extra use to `Len` to test if it's zero; that
744 // use shouldn't be a kill, even if the original use is.
745 MachineOperand NoKillLen = Len;
746 NoKillLen.setIsKill(false);
747
748 // Decide on which `MachineInstr` opcode we're going to use.
749 unsigned Eqz = Int64 ? WebAssembly::EQZ_I64 : WebAssembly::EQZ_I32;
750 unsigned MemoryFill =
751 Int64 ? WebAssembly::MEMORY_FILL_A64 : WebAssembly::MEMORY_FILL_A32;
752
753 // Create two new basic blocks; one for the new `memory.fill` that we can
754 // branch over, and one for the rest of the instructions after the original
755 // `memory.fill`.
756 const BasicBlock *LLVMBB = BB->getBasicBlock();
757 MachineFunction *F = BB->getParent();
758 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
759 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
760
762 F->insert(It, TrueMBB);
763 F->insert(It, DoneMBB);
764
765 // Transfer the remainder of BB and its successor edges to DoneMBB.
766 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
768
769 // Connect the CFG edges.
770 BB->addSuccessor(TrueMBB);
771 BB->addSuccessor(DoneMBB);
772 TrueMBB->addSuccessor(DoneMBB);
773
774 // Create a virtual register for the `Eqz` result.
775 unsigned EqzReg;
776 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
777
778 // Erase the original `memory.fill`.
779 MI.eraseFromParent();
780
781 // Test if `Len` is zero.
782 BuildMI(BB, DL, TII.get(Eqz), EqzReg).add(NoKillLen);
783
784 // Insert a new `memory.copy`.
785 BuildMI(TrueMBB, DL, TII.get(MemoryFill)).add(Mem).add(Dst).add(Val).add(Len);
786
787 // Create the CFG triangle.
788 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(DoneMBB).addReg(EqzReg);
789 BuildMI(TrueMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
790
791 return DoneMBB;
792}
793
794static MachineBasicBlock *
796 const WebAssemblySubtarget *Subtarget,
797 const TargetInstrInfo &TII) {
798 MachineInstr &CallParams = *CallResults.getPrevNode();
799 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS);
800 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||
801 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS);
802
803 bool IsIndirect =
804 CallParams.getOperand(0).isReg() || CallParams.getOperand(0).isFI();
805 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
806
807 bool IsFuncrefCall = false;
808 if (IsIndirect && CallParams.getOperand(0).isReg()) {
809 Register Reg = CallParams.getOperand(0).getReg();
810 const MachineFunction *MF = BB->getParent();
811 const MachineRegisterInfo &MRI = MF->getRegInfo();
812 const TargetRegisterClass *TRC = MRI.getRegClass(Reg);
813 IsFuncrefCall = (TRC == &WebAssembly::FUNCREFRegClass);
814 assert(!IsFuncrefCall || Subtarget->hasReferenceTypes());
815 }
816
817 unsigned CallOp;
818 if (IsIndirect && IsRetCall) {
819 CallOp = WebAssembly::RET_CALL_INDIRECT;
820 } else if (IsIndirect) {
821 CallOp = WebAssembly::CALL_INDIRECT;
822 } else if (IsRetCall) {
823 CallOp = WebAssembly::RET_CALL;
824 } else {
825 CallOp = WebAssembly::CALL;
826 }
827
828 MachineFunction &MF = *BB->getParent();
829 const MCInstrDesc &MCID = TII.get(CallOp);
830 MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
831
832 // Move the function pointer to the end of the arguments for indirect calls
833 if (IsIndirect) {
834 auto FnPtr = CallParams.getOperand(0);
835 CallParams.removeOperand(0);
836
837 // For funcrefs, call_indirect is done through __funcref_call_table and the
838 // funcref is always installed in slot 0 of the table, therefore instead of
839 // having the function pointer added at the end of the params list, a zero
840 // (the index in
841 // __funcref_call_table is added).
842 if (IsFuncrefCall) {
843 Register RegZero =
844 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
845 MachineInstrBuilder MIBC0 =
846 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
847
848 BB->insert(CallResults.getIterator(), MIBC0);
849 MachineInstrBuilder(MF, CallParams).addReg(RegZero);
850 } else
851 CallParams.addOperand(FnPtr);
852 }
853
854 for (auto Def : CallResults.defs())
855 MIB.add(Def);
856
857 if (IsIndirect) {
858 // Placeholder for the type index.
859 // This gets replaced with the correct value in WebAssemblyMCInstLower.cpp
860 MIB.addImm(0);
861 // The table into which this call_indirect indexes.
862 MCSymbolWasm *Table = IsFuncrefCall
864 MF.getContext(), Subtarget)
866 MF.getContext(), Subtarget);
867 if (Subtarget->hasCallIndirectOverlong()) {
868 MIB.addSym(Table);
869 } else {
870 // For the MVP there is at most one table whose number is 0, but we can't
871 // write a table symbol or issue relocations. Instead we just ensure the
872 // table is live and write a zero.
873 Table->setNoStrip();
874 MIB.addImm(0);
875 }
876 }
877
878 for (auto Use : CallParams.uses())
879 MIB.add(Use);
880
881 BB->insert(CallResults.getIterator(), MIB);
882 CallParams.eraseFromParent();
883 CallResults.eraseFromParent();
884
885 // If this is a funcref call, to avoid hidden GC roots, we need to clear the
886 // table slot with ref.null upon call_indirect return.
887 //
888 // This generates the following code, which comes right after a call_indirect
889 // of a funcref:
890 //
891 // i32.const 0
892 // ref.null func
893 // table.set __funcref_call_table
894 if (IsIndirect && IsFuncrefCall) {
896 MF.getContext(), Subtarget);
897 Register RegZero =
898 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
899 MachineInstr *Const0 =
900 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
901 BB->insertAfter(MIB.getInstr()->getIterator(), Const0);
902
903 Register RegFuncref =
904 MF.getRegInfo().createVirtualRegister(&WebAssembly::FUNCREFRegClass);
905 MachineInstr *RefNull =
906 BuildMI(MF, DL, TII.get(WebAssembly::REF_NULL_FUNCREF), RegFuncref);
907 BB->insertAfter(Const0->getIterator(), RefNull);
908
909 MachineInstr *TableSet =
910 BuildMI(MF, DL, TII.get(WebAssembly::TABLE_SET_FUNCREF))
911 .addSym(Table)
912 .addReg(RegZero)
913 .addReg(RegFuncref);
914 BB->insertAfter(RefNull->getIterator(), TableSet);
915 }
916
917 return BB;
918}
919
920MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
921 MachineInstr &MI, MachineBasicBlock *BB) const {
922 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
923 DebugLoc DL = MI.getDebugLoc();
924
925 switch (MI.getOpcode()) {
926 default:
927 llvm_unreachable("Unexpected instr type to insert");
928 case WebAssembly::FP_TO_SINT_I32_F32:
929 return LowerFPToInt(MI, DL, BB, TII, false, false, false,
930 WebAssembly::I32_TRUNC_S_F32);
931 case WebAssembly::FP_TO_UINT_I32_F32:
932 return LowerFPToInt(MI, DL, BB, TII, true, false, false,
933 WebAssembly::I32_TRUNC_U_F32);
934 case WebAssembly::FP_TO_SINT_I64_F32:
935 return LowerFPToInt(MI, DL, BB, TII, false, true, false,
936 WebAssembly::I64_TRUNC_S_F32);
937 case WebAssembly::FP_TO_UINT_I64_F32:
938 return LowerFPToInt(MI, DL, BB, TII, true, true, false,
939 WebAssembly::I64_TRUNC_U_F32);
940 case WebAssembly::FP_TO_SINT_I32_F64:
941 return LowerFPToInt(MI, DL, BB, TII, false, false, true,
942 WebAssembly::I32_TRUNC_S_F64);
943 case WebAssembly::FP_TO_UINT_I32_F64:
944 return LowerFPToInt(MI, DL, BB, TII, true, false, true,
945 WebAssembly::I32_TRUNC_U_F64);
946 case WebAssembly::FP_TO_SINT_I64_F64:
947 return LowerFPToInt(MI, DL, BB, TII, false, true, true,
948 WebAssembly::I64_TRUNC_S_F64);
949 case WebAssembly::FP_TO_UINT_I64_F64:
950 return LowerFPToInt(MI, DL, BB, TII, true, true, true,
951 WebAssembly::I64_TRUNC_U_F64);
952 case WebAssembly::MEMCPY_A32:
953 return LowerMemcpy(MI, DL, BB, TII, false);
954 case WebAssembly::MEMCPY_A64:
955 return LowerMemcpy(MI, DL, BB, TII, true);
956 case WebAssembly::MEMSET_A32:
957 return LowerMemset(MI, DL, BB, TII, false);
958 case WebAssembly::MEMSET_A64:
959 return LowerMemset(MI, DL, BB, TII, true);
960 case WebAssembly::CALL_RESULTS:
961 case WebAssembly::RET_CALL_RESULTS:
962 return LowerCallResults(MI, DL, BB, Subtarget, TII);
963 }
964}
965
966std::pair<unsigned, const TargetRegisterClass *>
967WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
968 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
969 // First, see if this is a constraint that directly corresponds to a
970 // WebAssembly register class.
971 if (Constraint.size() == 1) {
972 switch (Constraint[0]) {
973 case 'r':
974 assert(VT != MVT::iPTR && "Pointer MVT not expected here");
975 if (Subtarget->hasSIMD128() && VT.isVector()) {
976 if (VT.getSizeInBits() == 128)
977 return std::make_pair(0U, &WebAssembly::V128RegClass);
978 }
979 if (VT.isInteger() && !VT.isVector()) {
980 if (VT.getSizeInBits() <= 32)
981 return std::make_pair(0U, &WebAssembly::I32RegClass);
982 if (VT.getSizeInBits() <= 64)
983 return std::make_pair(0U, &WebAssembly::I64RegClass);
984 }
985 if (VT.isFloatingPoint() && !VT.isVector()) {
986 switch (VT.getSizeInBits()) {
987 case 32:
988 return std::make_pair(0U, &WebAssembly::F32RegClass);
989 case 64:
990 return std::make_pair(0U, &WebAssembly::F64RegClass);
991 default:
992 break;
993 }
994 }
995 break;
996 default:
997 break;
998 }
999 }
1000
1001 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1002}
1003
1004bool WebAssemblyTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
1005 // Assume ctz is a relatively cheap operation.
1006 return true;
1007}
1008
1009bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
1010 // Assume clz is a relatively cheap operation.
1011 return true;
1012}
1013
1014bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1015 const AddrMode &AM,
1016 Type *Ty, unsigned AS,
1017 Instruction *I) const {
1018 // WebAssembly offsets are added as unsigned without wrapping. The
1019 // isLegalAddressingMode gives us no way to determine if wrapping could be
1020 // happening, so we approximate this by accepting only non-negative offsets.
1021 if (AM.BaseOffs < 0)
1022 return false;
1023
1024 // WebAssembly has no scale register operands.
1025 if (AM.Scale != 0)
1026 return false;
1027
1028 // Everything else is legal.
1029 return true;
1030}
1031
1032bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
1033 EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/,
1034 MachineMemOperand::Flags /*Flags*/, unsigned *Fast) const {
1035 // WebAssembly supports unaligned accesses, though it should be declared
1036 // with the p2align attribute on loads and stores which do so, and there
1037 // may be a performance impact. We tell LLVM they're "fast" because
1038 // for the kinds of things that LLVM uses this for (merging adjacent stores
1039 // of constants, etc.), WebAssembly implementations will either want the
1040 // unaligned access or they'll split anyway.
1041 if (Fast)
1042 *Fast = 1;
1043 return true;
1044}
1045
1046bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
1047 AttributeList Attr) const {
1048 // The current thinking is that wasm engines will perform this optimization,
1049 // so we can save on code size.
1050 return true;
1051}
1052
1053bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
1054 EVT ExtT = ExtVal.getValueType();
1055 SDValue N0 = peekThroughFreeze(ExtVal->getOperand(0));
1056 auto *Load = dyn_cast<LoadSDNode>(N0);
1057 if (!Load)
1058 return false;
1059 EVT MemT = Load->getValueType(0);
1060 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
1061 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
1062 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
1063}
1064
1065bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
1066 const GlobalAddressSDNode *GA) const {
1067 // Wasm doesn't support function addresses with offsets
1068 const GlobalValue *GV = GA->getGlobal();
1070}
1071
1072EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
1073 LLVMContext &C,
1074 EVT VT) const {
1075 if (VT.isVector()) {
1076 if (VT.getVectorElementType() == MVT::f16 && !Subtarget->hasFP16())
1077 return VT.changeElementType(C, MVT::i1);
1078
1080 }
1081
1082 // So far, all branch instructions in Wasm take an I32 condition.
1083 // The default TargetLowering::getSetCCResultType returns the pointer size,
1084 // which would be useful to reduce instruction counts when testing
1085 // against 64-bit pointers/values if at some point Wasm supports that.
1086 return EVT::getIntegerVT(C, 32);
1087}
1088
1089void WebAssemblyTargetLowering::getTgtMemIntrinsic(
1091 MachineFunction &MF, unsigned Intrinsic) const {
1093 switch (Intrinsic) {
1094 case Intrinsic::wasm_memory_atomic_notify:
1096 Info.memVT = MVT::i32;
1097 Info.ptrVal = I.getArgOperand(0);
1098 Info.offset = 0;
1099 Info.align = Align(4);
1100 // atomic.notify instruction does not really load the memory specified with
1101 // this argument, but MachineMemOperand should either be load or store, so
1102 // we set this to a load.
1103 // FIXME Volatile isn't really correct, but currently all LLVM atomic
1104 // instructions are treated as volatiles in the backend, so we should be
1105 // consistent. The same applies for wasm_atomic_wait intrinsics too.
1107 Infos.push_back(Info);
1108 return;
1109 case Intrinsic::wasm_memory_atomic_wait32:
1111 Info.memVT = MVT::i32;
1112 Info.ptrVal = I.getArgOperand(0);
1113 Info.offset = 0;
1114 Info.align = Align(4);
1116 Infos.push_back(Info);
1117 return;
1118 case Intrinsic::wasm_memory_atomic_wait64:
1120 Info.memVT = MVT::i64;
1121 Info.ptrVal = I.getArgOperand(0);
1122 Info.offset = 0;
1123 Info.align = Align(8);
1125 Infos.push_back(Info);
1126 return;
1127 case Intrinsic::wasm_loadf16_f32:
1129 Info.memVT = MVT::f16;
1130 Info.ptrVal = I.getArgOperand(0);
1131 Info.offset = 0;
1132 Info.align = Align(2);
1134 Infos.push_back(Info);
1135 return;
1136 case Intrinsic::wasm_storef16_f32:
1138 Info.memVT = MVT::f16;
1139 Info.ptrVal = I.getArgOperand(1);
1140 Info.offset = 0;
1141 Info.align = Align(2);
1143 Infos.push_back(Info);
1144 return;
1145 default:
1146 return;
1147 }
1148}
1149
1150void WebAssemblyTargetLowering::computeKnownBitsForTargetNode(
1151 const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
1152 const SelectionDAG &DAG, unsigned Depth) const {
1153 switch (Op.getOpcode()) {
1154 default:
1155 break;
1157 unsigned IntNo = Op.getConstantOperandVal(0);
1158 switch (IntNo) {
1159 default:
1160 break;
1161 case Intrinsic::wasm_bitmask: {
1162 unsigned BitWidth = Known.getBitWidth();
1163 EVT VT = Op.getOperand(1).getSimpleValueType();
1164 unsigned PossibleBits = VT.getVectorNumElements();
1165 APInt ZeroMask = APInt::getHighBitsSet(BitWidth, BitWidth - PossibleBits);
1166 Known.Zero |= ZeroMask;
1167 break;
1168 }
1169 }
1170 break;
1171 }
1172 case WebAssemblyISD::EXTEND_LOW_U:
1173 case WebAssemblyISD::EXTEND_HIGH_U: {
1174 // We know the high half, of each destination vector element, will be zero.
1175 SDValue SrcOp = Op.getOperand(0);
1176 EVT VT = SrcOp.getSimpleValueType();
1177 unsigned BitWidth = Known.getBitWidth();
1178 if (VT == MVT::v8i8 || VT == MVT::v16i8) {
1179 assert(BitWidth >= 8 && "Unexpected width!");
1181 Known.Zero |= Mask;
1182 } else if (VT == MVT::v4i16 || VT == MVT::v8i16) {
1183 assert(BitWidth >= 16 && "Unexpected width!");
1185 Known.Zero |= Mask;
1186 } else if (VT == MVT::v2i32 || VT == MVT::v4i32) {
1187 assert(BitWidth >= 32 && "Unexpected width!");
1189 Known.Zero |= Mask;
1190 }
1191 break;
1192 }
1193 // For 128-bit addition if the upper bits are all zero then it's known that
1194 // the upper bits of the result will have all bits guaranteed zero except the
1195 // first.
1196 case WebAssemblyISD::I64_ADD128:
1197 if (Op.getResNo() == 1) {
1198 SDValue LHS_HI = Op.getOperand(1);
1199 SDValue RHS_HI = Op.getOperand(3);
1200 if (isNullConstant(LHS_HI) && isNullConstant(RHS_HI))
1201 Known.Zero.setBitsFrom(1);
1202 }
1203 break;
1204 }
1205}
1206
1208WebAssemblyTargetLowering::getPreferredVectorAction(MVT VT) const {
1209 if (VT.isFixedLengthVector()) {
1210 MVT EltVT = VT.getVectorElementType();
1211 // We have legal vector types with these lane types, so widening the
1212 // vector would let us use some of the lanes directly without having to
1213 // extend or truncate values.
1214 if (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
1215 EltVT == MVT::i64 || EltVT == MVT::f32 || EltVT == MVT::f64)
1216 return TypeWidenVector;
1217 }
1218
1220}
1221
1222bool WebAssemblyTargetLowering::isFMAFasterThanFMulAndFAdd(
1223 const MachineFunction &MF, EVT VT) const {
1224 if (!Subtarget->hasFP16() || !VT.isVector())
1225 return false;
1226
1227 EVT ScalarVT = VT.getScalarType();
1228 if (!ScalarVT.isSimple())
1229 return false;
1230
1231 return ScalarVT.getSimpleVT().SimpleTy == MVT::f16;
1232}
1233
1234bool WebAssemblyTargetLowering::shouldSimplifyDemandedVectorElts(
1235 SDValue Op, const TargetLoweringOpt &TLO) const {
1236 // ISel process runs DAGCombiner after legalization; this step is called
1237 // SelectionDAG optimization phase. This post-legalization combining process
1238 // runs DAGCombiner on each node, and if there was a change to be made,
1239 // re-runs legalization again on it and its user nodes to make sure
1240 // everythiing is in a legalized state.
1241 //
1242 // The legalization calls lowering routines, and we do our custom lowering for
1243 // build_vectors (LowerBUILD_VECTOR), which converts undef vector elements
1244 // into zeros. But there is a set of routines in DAGCombiner that turns unused
1245 // (= not demanded) nodes into undef, among which SimplifyDemandedVectorElts
1246 // turns unused vector elements into undefs. But this routine does not work
1247 // with our custom LowerBUILD_VECTOR, which turns undefs into zeros. This
1248 // combination can result in a infinite loop, in which undefs are converted to
1249 // zeros in legalization and back to undefs in combining.
1250 //
1251 // So after DAG is legalized, we prevent SimplifyDemandedVectorElts from
1252 // running for build_vectors.
1253 if (Op.getOpcode() == ISD::BUILD_VECTOR && TLO.LegalOps && TLO.LegalTys)
1254 return false;
1255 return true;
1256}
1257
1258//===----------------------------------------------------------------------===//
1259// WebAssembly Lowering private implementation.
1260//===----------------------------------------------------------------------===//
1261
1262//===----------------------------------------------------------------------===//
1263// Lowering Code
1264//===----------------------------------------------------------------------===//
1265
1266static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
1268 DAG.getContext()->diagnose(
1269 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
1270}
1271
1272// Test whether the given calling convention is supported.
1274 // We currently support the language-independent target-independent
1275 // conventions. We don't yet have a way to annotate calls with properties like
1276 // "cold", and we don't have any call-clobbered registers, so these are mostly
1277 // all handled the same.
1278 return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
1279 CallConv == CallingConv::Cold ||
1280 CallConv == CallingConv::PreserveMost ||
1281 CallConv == CallingConv::PreserveAll ||
1282 CallConv == CallingConv::CXX_FAST_TLS ||
1284 CallConv == CallingConv::Swift || CallConv == CallingConv::SwiftTail;
1285}
1286
1287SDValue
1288WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
1289 SmallVectorImpl<SDValue> &InVals) const {
1290 SelectionDAG &DAG = CLI.DAG;
1291 SDLoc DL = CLI.DL;
1292 SDValue Chain = CLI.Chain;
1293 SDValue Callee = CLI.Callee;
1294 MachineFunction &MF = DAG.getMachineFunction();
1295 auto Layout = MF.getDataLayout();
1296
1297 CallingConv::ID CallConv = CLI.CallConv;
1298 if (!callingConvSupported(CallConv))
1299 fail(DL, DAG,
1300 "WebAssembly doesn't support language-specific or target-specific "
1301 "calling conventions yet");
1302 if (CLI.IsPatchPoint)
1303 fail(DL, DAG, "WebAssembly doesn't support patch point yet");
1304
1305 if (CLI.IsTailCall) {
1306 auto NoTail = [&](const char *Msg) {
1307 if (CLI.CB && CLI.CB->isMustTailCall())
1308 fail(DL, DAG, Msg);
1309 CLI.IsTailCall = false;
1310 };
1311
1312 if (!Subtarget->hasTailCall())
1313 NoTail("WebAssembly 'tail-call' feature not enabled");
1314
1315 // Varargs calls cannot be tail calls because the buffer is on the stack
1316 if (CLI.IsVarArg)
1317 NoTail("WebAssembly does not support varargs tail calls");
1318
1319 // Do not tail call unless caller and callee return types match
1320 const Function &F = MF.getFunction();
1321 const TargetMachine &TM = getTargetMachine();
1322 Type *RetTy = F.getReturnType();
1323 SmallVector<MVT, 4> CallerRetTys;
1324 SmallVector<MVT, 4> CalleeRetTys;
1325 computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
1326 computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
1327 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
1328 std::equal(CallerRetTys.begin(), CallerRetTys.end(),
1329 CalleeRetTys.begin());
1330 if (!TypesMatch)
1331 NoTail("WebAssembly tail call requires caller and callee return types to "
1332 "match");
1333
1334 // If pointers to local stack values are passed, we cannot tail call
1335 if (CLI.CB) {
1336 for (auto &Arg : CLI.CB->args()) {
1337 Value *Val = Arg.get();
1338 // Trace the value back through pointer operations
1339 while (true) {
1340 Value *Src = Val->stripPointerCastsAndAliases();
1341 if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
1342 Src = GEP->getPointerOperand();
1343 if (Val == Src)
1344 break;
1345 Val = Src;
1346 }
1347 if (isa<AllocaInst>(Val)) {
1348 NoTail(
1349 "WebAssembly does not support tail calling with stack arguments");
1350 break;
1351 }
1352 }
1353 }
1354 }
1355
1356 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1357 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1358 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1359
1360 // The generic code may have added an sret argument. If we're lowering an
1361 // invoke function, the ABI requires that the function pointer be the first
1362 // argument, so we may have to swap the arguments.
1363 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
1364 Outs[0].Flags.isSRet()) {
1365 std::swap(Outs[0], Outs[1]);
1366 std::swap(OutVals[0], OutVals[1]);
1367 }
1368
1369 bool HasSwiftSelfArg = false;
1370 bool HasSwiftErrorArg = false;
1371 bool HasSwiftAsyncArg = false;
1372 unsigned NumFixedArgs = 0;
1373 for (unsigned I = 0; I < Outs.size(); ++I) {
1374 const ISD::OutputArg &Out = Outs[I];
1375 SDValue &OutVal = OutVals[I];
1376 HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
1377 HasSwiftErrorArg |= Out.Flags.isSwiftError();
1378 HasSwiftAsyncArg |= Out.Flags.isSwiftAsync();
1379 if (Out.Flags.isNest())
1380 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1381 if (Out.Flags.isInAlloca())
1382 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1383 if (Out.Flags.isInConsecutiveRegs())
1384 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1386 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1387 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
1388 auto &MFI = MF.getFrameInfo();
1389 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
1391 /*isSS=*/false);
1392 SDValue SizeNode =
1393 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
1394 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
1395 Chain = DAG.getMemcpy(Chain, DL, FINode, OutVal, SizeNode,
1397 /*isVolatile*/ false, /*AlwaysInline=*/false,
1398 /*CI=*/nullptr, std::nullopt, MachinePointerInfo(),
1399 MachinePointerInfo());
1400 OutVal = FINode;
1401 }
1402 // Count the number of fixed args *after* legalization.
1403 NumFixedArgs += !Out.Flags.isVarArg();
1404 }
1405
1406 bool IsVarArg = CLI.IsVarArg;
1407 auto PtrVT = getPointerTy(Layout);
1408
1409 // For swiftcc and swifttailcc, emit additional swiftself, swifterror, and
1410 // (for swifttailcc) swiftasync arguments if there aren't. These additional
1411 // arguments are also added for callee signature. They are necessary to match
1412 // callee and caller signature for indirect call.
1413 if (CallConv == CallingConv::Swift || CallConv == CallingConv::SwiftTail) {
1414 Type *PtrTy = PointerType::getUnqual(*DAG.getContext());
1415 if (!HasSwiftSelfArg) {
1416 NumFixedArgs++;
1417 ISD::ArgFlagsTy Flags;
1418 Flags.setSwiftSelf();
1419 ISD::OutputArg Arg(Flags, PtrVT, EVT(PtrVT), PtrTy, 0, 0);
1420 CLI.Outs.push_back(Arg);
1421 SDValue ArgVal = DAG.getUNDEF(PtrVT);
1422 CLI.OutVals.push_back(ArgVal);
1423 }
1424 if (!HasSwiftErrorArg) {
1425 NumFixedArgs++;
1426 ISD::ArgFlagsTy Flags;
1427 Flags.setSwiftError();
1428 ISD::OutputArg Arg(Flags, PtrVT, EVT(PtrVT), PtrTy, 0, 0);
1429 CLI.Outs.push_back(Arg);
1430 SDValue ArgVal = DAG.getUNDEF(PtrVT);
1431 CLI.OutVals.push_back(ArgVal);
1432 }
1433 if (CallConv == CallingConv::SwiftTail && !HasSwiftAsyncArg) {
1434 NumFixedArgs++;
1435 ISD::ArgFlagsTy Flags;
1436 Flags.setSwiftAsync();
1437 ISD::OutputArg Arg(Flags, PtrVT, EVT(PtrVT), PtrTy, 0, 0);
1438 CLI.Outs.push_back(Arg);
1439 SDValue ArgVal = DAG.getUNDEF(PtrVT);
1440 CLI.OutVals.push_back(ArgVal);
1441 }
1442 }
1443
1444 // Analyze operands of the call, assigning locations to each operand.
1446 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1447
1448 if (IsVarArg) {
1449 // Outgoing non-fixed arguments are placed in a buffer. First
1450 // compute their offsets and the total amount of buffer space needed.
1451 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
1452 const ISD::OutputArg &Out = Outs[I];
1453 SDValue &Arg = OutVals[I];
1454 EVT VT = Arg.getValueType();
1455 assert(VT != MVT::iPTR && "Legalized args should be concrete");
1456 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
1457 Align Alignment =
1458 std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty));
1459 unsigned Offset =
1460 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
1461 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
1462 Offset, VT.getSimpleVT(),
1464 }
1465 }
1466
1467 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
1468
1469 SDValue FINode;
1470 if (IsVarArg && NumBytes) {
1471 // For non-fixed arguments, next emit stores to store the argument values
1472 // to the stack buffer at the offsets computed above.
1473 MaybeAlign StackAlign = Layout.getStackAlignment();
1474 assert(StackAlign && "data layout string is missing stack alignment");
1475 int FI = MF.getFrameInfo().CreateStackObject(NumBytes, *StackAlign,
1476 /*isSS=*/false);
1477 unsigned ValNo = 0;
1479 for (SDValue Arg : drop_begin(OutVals, NumFixedArgs)) {
1480 assert(ArgLocs[ValNo].getValNo() == ValNo &&
1481 "ArgLocs should remain in order and only hold varargs args");
1482 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
1483 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
1484 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
1485 DAG.getConstant(Offset, DL, PtrVT));
1486 Chains.push_back(
1487 DAG.getStore(Chain, DL, Arg, Add,
1489 }
1490 if (!Chains.empty())
1491 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
1492 } else if (IsVarArg) {
1493 FINode = DAG.getIntPtrConstant(0, DL);
1494 }
1495
1496 if (Callee->getOpcode() == ISD::GlobalAddress) {
1497 // If the callee is a GlobalAddress node (quite common, every direct call
1498 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
1499 // doesn't at MO_GOT which is not needed for direct calls.
1500 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Callee);
1503 GA->getOffset());
1504 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
1505 getPointerTy(DAG.getDataLayout()), Callee);
1506 }
1507
1508 // Compute the operands for the CALLn node.
1510 Ops.push_back(Chain);
1511 Ops.push_back(Callee);
1512
1513 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
1514 // isn't reliable.
1515 Ops.append(OutVals.begin(),
1516 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
1517 // Add a pointer to the vararg buffer.
1518 if (IsVarArg)
1519 Ops.push_back(FINode);
1520
1521 SmallVector<EVT, 8> InTys;
1522 for (const auto &In : Ins) {
1523 assert(!In.Flags.isByVal() && "byval is not valid for return values");
1524 assert(!In.Flags.isNest() && "nest is not valid for return values");
1525 if (In.Flags.isInAlloca())
1526 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
1527 if (In.Flags.isInConsecutiveRegs())
1528 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
1529 if (In.Flags.isInConsecutiveRegsLast())
1530 fail(DL, DAG,
1531 "WebAssembly hasn't implemented cons regs last return values");
1532 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1533 // registers.
1534 InTys.push_back(In.VT);
1535 }
1536
1537 // Lastly, if this is a call to a funcref we need to add an instruction
1538 // table.set to the chain and transform the call.
1540 CLI.CB->getCalledOperand()->getType())) {
1541 // In the absence of function references proposal where a funcref call is
1542 // lowered to call_ref, using reference types we generate a table.set to set
1543 // the funcref to a special table used solely for this purpose, followed by
1544 // a call_indirect. Here we just generate the table set, and return the
1545 // SDValue of the table.set so that LowerCall can finalize the lowering by
1546 // generating the call_indirect.
1547 SDValue Chain = Ops[0];
1548
1550 MF.getContext(), Subtarget);
1551 SDValue Sym = DAG.getMCSymbol(Table, PtrVT);
1552 SDValue TableSlot = DAG.getConstant(0, DL, MVT::i32);
1553 SDValue TableSetOps[] = {Chain, Sym, TableSlot, Callee};
1554 SDValue TableSet = DAG.getMemIntrinsicNode(
1555 WebAssemblyISD::TABLE_SET, DL, DAG.getVTList(MVT::Other), TableSetOps,
1556 MVT::funcref,
1557 // Machine Mem Operand args
1558 MachinePointerInfo(
1560 CLI.CB->getCalledOperand()->getPointerAlignment(DAG.getDataLayout()),
1562
1563 Ops[0] = TableSet; // The new chain is the TableSet itself
1564 }
1565
1566 if (CLI.IsTailCall) {
1567 // ret_calls do not return values to the current frame
1568 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1569 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
1570 }
1571
1572 InTys.push_back(MVT::Other);
1573 SDVTList InTyList = DAG.getVTList(InTys);
1574 SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
1575
1576 for (size_t I = 0; I < Ins.size(); ++I)
1577 InVals.push_back(Res.getValue(I));
1578
1579 // Return the chain
1580 return Res.getValue(Ins.size());
1581}
1582
1583bool WebAssemblyTargetLowering::CanLowerReturn(
1584 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
1585 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext & /*Context*/,
1586 const Type *RetTy) const {
1587 // WebAssembly can only handle returning tuples with multivalue enabled
1588 return WebAssembly::canLowerReturn(Outs.size(), Subtarget);
1589}
1590
1591SDValue WebAssemblyTargetLowering::LowerReturn(
1592 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
1594 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1595 SelectionDAG &DAG) const {
1596 assert(WebAssembly::canLowerReturn(Outs.size(), Subtarget) &&
1597 "MVP WebAssembly can only return up to one value");
1598 if (!callingConvSupported(CallConv))
1599 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1600
1601 SmallVector<SDValue, 4> RetOps(1, Chain);
1602 RetOps.append(OutVals.begin(), OutVals.end());
1603 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
1604
1605 // Record the number and types of the return values.
1606 for (const ISD::OutputArg &Out : Outs) {
1607 assert(!Out.Flags.isByVal() && "byval is not valid for return values");
1608 assert(!Out.Flags.isNest() && "nest is not valid for return values");
1609 assert(!Out.Flags.isVarArg() && "non-fixed return value is not valid");
1610 if (Out.Flags.isInAlloca())
1611 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
1612 if (Out.Flags.isInConsecutiveRegs())
1613 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
1615 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
1616 }
1617
1618 return Chain;
1619}
1620
1621SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1622 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1623 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1624 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1625 if (!callingConvSupported(CallConv))
1626 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1627
1628 MachineFunction &MF = DAG.getMachineFunction();
1629 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
1630
1631 // Set up the incoming ARGUMENTS value, which serves to represent the liveness
1632 // of the incoming values before they're represented by virtual registers.
1633 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
1634
1635 bool HasSwiftErrorArg = false;
1636 bool HasSwiftSelfArg = false;
1637 bool HasSwiftAsyncArg = false;
1638 for (const ISD::InputArg &In : Ins) {
1639 HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1640 HasSwiftErrorArg |= In.Flags.isSwiftError();
1641 HasSwiftAsyncArg |= In.Flags.isSwiftAsync();
1642 if (In.Flags.isInAlloca())
1643 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1644 if (In.Flags.isNest())
1645 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1646 if (In.Flags.isInConsecutiveRegs())
1647 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1648 if (In.Flags.isInConsecutiveRegsLast())
1649 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1650 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1651 // registers.
1652 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
1653 DAG.getTargetConstant(InVals.size(),
1654 DL, MVT::i32))
1655 : DAG.getUNDEF(In.VT));
1656
1657 // Record the number and types of arguments.
1658 MFI->addParam(In.VT);
1659 }
1660
1661 // For swiftcc and swifttailcc, emit additional swiftself, swifterror, and
1662 // (for swifttailcc) swiftasync arguments if there aren't. These additional
1663 // arguments are also added for callee signature. They are necessary to match
1664 // callee and caller signature for indirect call.
1665 auto PtrVT = getPointerTy(MF.getDataLayout());
1666 if (CallConv == CallingConv::Swift || CallConv == CallingConv::SwiftTail) {
1667 if (!HasSwiftSelfArg) {
1668 MFI->addParam(PtrVT);
1669 }
1670 if (!HasSwiftErrorArg) {
1671 MFI->addParam(PtrVT);
1672 }
1673 if (CallConv == CallingConv::SwiftTail && !HasSwiftAsyncArg) {
1674 MFI->addParam(PtrVT);
1675 }
1676 }
1677 // Varargs are copied into a buffer allocated by the caller, and a pointer to
1678 // the buffer is passed as an argument.
1679 if (IsVarArg) {
1680 MVT PtrVT = getPointerTy(MF.getDataLayout());
1681 Register VarargVreg =
1683 MFI->setVarargBufferVreg(VarargVreg);
1684 Chain = DAG.getCopyToReg(
1685 Chain, DL, VarargVreg,
1686 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
1687 DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
1688 MFI->addParam(PtrVT);
1689 }
1690
1691 // Record the number and types of arguments and results.
1692 SmallVector<MVT, 4> Params;
1695 MF.getFunction(), DAG.getTarget(), Params, Results);
1696 for (MVT VT : Results)
1697 MFI->addResult(VT);
1698 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1699 // the param logic here with ComputeSignatureVTs
1700 assert(MFI->getParams().size() == Params.size() &&
1701 std::equal(MFI->getParams().begin(), MFI->getParams().end(),
1702 Params.begin()));
1703
1704 return Chain;
1705}
1706
1707void WebAssemblyTargetLowering::ReplaceNodeResults(
1709 switch (N->getOpcode()) {
1711 // Do not add any results, signifying that N should not be custom lowered
1712 // after all. This happens because simd128 turns on custom lowering for
1713 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1714 // illegal type.
1715 break;
1719 // Do not add any results, signifying that N should not be custom lowered.
1720 // EXTEND_VECTOR_INREG is implemented for some vectors, but not all.
1721 break;
1722 case ISD::FP_ROUND: {
1723 EVT VT = N->getValueType(0);
1724 SDValue Src = N->getOperand(0);
1725 if (VT == MVT::v4f16 && Src.getValueType() == MVT::v4f32) {
1726 Results.push_back(
1727 DAG.getNode(WebAssemblyISD::DEMOTE_ZERO, SDLoc(N), MVT::v8f16, Src));
1728 }
1729 break;
1730 }
1731 case ISD::ADD:
1732 case ISD::SUB:
1733 Results.push_back(Replace128Op(N, DAG));
1734 break;
1735 default:
1737 "ReplaceNodeResults not implemented for this op for WebAssembly!");
1738 }
1739}
1740
1741//===----------------------------------------------------------------------===//
1742// Custom lowering hooks.
1743//===----------------------------------------------------------------------===//
1744
1745SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1746 SelectionDAG &DAG) const {
1747 SDLoc DL(Op);
1748 switch (Op.getOpcode()) {
1749 default:
1750 llvm_unreachable("unimplemented operation lowering");
1751 return SDValue();
1752 case ISD::FrameIndex:
1753 return LowerFrameIndex(Op, DAG);
1754 case ISD::GlobalAddress:
1755 return LowerGlobalAddress(Op, DAG);
1757 return LowerGlobalTLSAddress(Op, DAG);
1759 return LowerExternalSymbol(Op, DAG);
1760 case ISD::JumpTable:
1761 return LowerJumpTable(Op, DAG);
1762 case ISD::BR_JT:
1763 return LowerBR_JT(Op, DAG);
1764 case ISD::VASTART:
1765 return LowerVASTART(Op, DAG);
1766 case ISD::BlockAddress:
1767 case ISD::BRIND:
1768 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1769 return SDValue();
1770 case ISD::RETURNADDR:
1771 return LowerRETURNADDR(Op, DAG);
1772 case ISD::FRAMEADDR:
1773 return LowerFRAMEADDR(Op, DAG);
1774 case ISD::CopyToReg:
1775 return LowerCopyToReg(Op, DAG);
1778 return LowerAccessVectorElement(Op, DAG);
1782 return LowerIntrinsic(Op, DAG);
1784 return LowerSIGN_EXTEND_INREG(Op, DAG);
1788 return LowerEXTEND_VECTOR_INREG(Op, DAG);
1789 case ISD::BUILD_VECTOR:
1790 return LowerBUILD_VECTOR(Op, DAG);
1792 return LowerVECTOR_SHUFFLE(Op, DAG);
1793 case ISD::SETCC:
1794 return LowerSETCC(Op, DAG);
1795 case ISD::SHL:
1796 case ISD::SRA:
1797 case ISD::SRL:
1798 return LowerShift(Op, DAG);
1801 return LowerFP_TO_INT_SAT(Op, DAG);
1802 case ISD::FMINNUM:
1803 case ISD::FMINIMUMNUM:
1804 return LowerFMIN(Op, DAG);
1805 case ISD::FMAXNUM:
1806 case ISD::FMAXIMUMNUM:
1807 return LowerFMAX(Op, DAG);
1808 case ISD::LOAD:
1809 return LowerLoad(Op, DAG);
1810 case ISD::STORE:
1811 return LowerStore(Op, DAG);
1812 case ISD::CTPOP:
1813 case ISD::CTLZ:
1814 case ISD::CTTZ:
1815 return DAG.UnrollVectorOp(Op.getNode());
1816 case ISD::CLEAR_CACHE:
1817 report_fatal_error("llvm.clear_cache is not supported on wasm");
1818 case ISD::SMUL_LOHI:
1819 case ISD::UMUL_LOHI:
1820 return LowerMUL_LOHI(Op, DAG);
1821 case ISD::UADDO:
1822 return LowerUADDO(Op, DAG);
1823 }
1824}
1825
1829
1830 return false;
1831}
1832
1833static std::optional<unsigned> IsWebAssemblyLocal(SDValue Op,
1834 SelectionDAG &DAG) {
1836 if (!FI)
1837 return std::nullopt;
1838
1839 auto &MF = DAG.getMachineFunction();
1841}
1842
1843SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op,
1844 SelectionDAG &DAG) const {
1845 SDLoc DL(Op);
1846 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
1847 const SDValue &Value = SN->getValue();
1848 const SDValue &Base = SN->getBasePtr();
1849 const SDValue &Offset = SN->getOffset();
1850
1852 if (!Offset->isUndef())
1853 report_fatal_error("unexpected offset when storing to webassembly global",
1854 false);
1855
1856 SDVTList Tys = DAG.getVTList(MVT::Other);
1857 SDValue Ops[] = {SN->getChain(), Value, Base};
1858 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_SET, DL, Tys, Ops,
1859 SN->getMemoryVT(), SN->getMemOperand());
1860 }
1861
1862 if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1863 if (!Offset->isUndef())
1864 report_fatal_error("unexpected offset when storing to webassembly local",
1865 false);
1866
1867 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1868 SDVTList Tys = DAG.getVTList(MVT::Other); // The chain.
1869 SDValue Ops[] = {SN->getChain(), Idx, Value};
1870 return DAG.getNode(WebAssemblyISD::LOCAL_SET, DL, Tys, Ops);
1871 }
1872
1875 "Encountered an unlowerable store to the wasm_var address space",
1876 false);
1877
1878 return Op;
1879}
1880
1881SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op,
1882 SelectionDAG &DAG) const {
1883 SDLoc DL(Op);
1884 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
1885 const SDValue &Base = LN->getBasePtr();
1886 const SDValue &Offset = LN->getOffset();
1887
1889 if (!Offset->isUndef())
1891 "unexpected offset when loading from webassembly global", false);
1892
1893 SDVTList Tys = DAG.getVTList(LN->getValueType(0), MVT::Other);
1894 SDValue Ops[] = {LN->getChain(), Base};
1895 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_GET, DL, Tys, Ops,
1896 LN->getMemoryVT(), LN->getMemOperand());
1897 }
1898
1899 if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1900 if (!Offset->isUndef())
1902 "unexpected offset when loading from webassembly local", false);
1903
1904 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1905 EVT LocalVT = LN->getValueType(0);
1906 return DAG.getNode(WebAssemblyISD::LOCAL_GET, DL, {LocalVT, MVT::Other},
1907 {LN->getChain(), Idx});
1908 }
1909
1912 "Encountered an unlowerable load from the wasm_var address space",
1913 false);
1914
1915 return Op;
1916}
1917
1918SDValue WebAssemblyTargetLowering::LowerMUL_LOHI(SDValue Op,
1919 SelectionDAG &DAG) const {
1920 assert(Subtarget->hasWideArithmetic());
1921 assert(Op.getValueType() == MVT::i64);
1922 SDLoc DL(Op);
1923 unsigned Opcode;
1924 switch (Op.getOpcode()) {
1925 case ISD::UMUL_LOHI:
1926 Opcode = WebAssemblyISD::I64_MUL_WIDE_U;
1927 break;
1928 case ISD::SMUL_LOHI:
1929 Opcode = WebAssemblyISD::I64_MUL_WIDE_S;
1930 break;
1931 default:
1932 llvm_unreachable("unexpected opcode");
1933 }
1934 SDValue LHS = Op.getOperand(0);
1935 SDValue RHS = Op.getOperand(1);
1936 SDValue Lo =
1937 DAG.getNode(Opcode, DL, DAG.getVTList(MVT::i64, MVT::i64), LHS, RHS);
1938 SDValue Hi(Lo.getNode(), 1);
1939 SDValue Ops[] = {Lo, Hi};
1940 return DAG.getMergeValues(Ops, DL);
1941}
1942
1943// Lowers `UADDO` intrinsics to an `i64.add128` instruction when it's enabled.
1944//
1945// This enables generating a single wasm instruction for this operation where
1946// the upper half of both operands are constant zeros. The upper half of the
1947// result is then whether the overflow happened.
1948SDValue WebAssemblyTargetLowering::LowerUADDO(SDValue Op,
1949 SelectionDAG &DAG) const {
1950 assert(Subtarget->hasWideArithmetic());
1951 assert(Op.getValueType() == MVT::i64);
1952 assert(Op.getOpcode() == ISD::UADDO);
1953 SDLoc DL(Op);
1954 SDValue LHS = Op.getOperand(0);
1955 SDValue RHS = Op.getOperand(1);
1956 SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
1957 SDValue Result =
1958 DAG.getNode(WebAssemblyISD::I64_ADD128, DL,
1959 DAG.getVTList(MVT::i64, MVT::i64), LHS, Zero, RHS, Zero);
1960 SDValue CarryI64(Result.getNode(), 1);
1961 SDValue CarryI32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, CarryI64);
1962 SDValue Ops[] = {Result, CarryI32};
1963 return DAG.getMergeValues(Ops, DL);
1964}
1965
1966SDValue WebAssemblyTargetLowering::Replace128Op(SDNode *N,
1967 SelectionDAG &DAG) const {
1968 assert(Subtarget->hasWideArithmetic());
1969 assert(N->getValueType(0) == MVT::i128);
1970 SDLoc DL(N);
1971 unsigned Opcode;
1972 switch (N->getOpcode()) {
1973 case ISD::ADD:
1974 Opcode = WebAssemblyISD::I64_ADD128;
1975 break;
1976 case ISD::SUB:
1977 Opcode = WebAssemblyISD::I64_SUB128;
1978 break;
1979 default:
1980 llvm_unreachable("unexpected opcode");
1981 }
1982 SDValue LHS = N->getOperand(0);
1983 SDValue RHS = N->getOperand(1);
1984
1985 SDValue C0 = DAG.getConstant(0, DL, MVT::i64);
1986 SDValue C1 = DAG.getConstant(1, DL, MVT::i64);
1987 SDValue LHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, LHS, C0);
1988 SDValue LHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, LHS, C1);
1989 SDValue RHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, RHS, C0);
1990 SDValue RHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, RHS, C1);
1991 SDValue Result_LO = DAG.getNode(Opcode, DL, DAG.getVTList(MVT::i64, MVT::i64),
1992 LHS_0, LHS_1, RHS_0, RHS_1);
1993 SDValue Result_HI(Result_LO.getNode(), 1);
1994 return DAG.getNode(ISD::BUILD_PAIR, DL, N->getVTList(), Result_LO, Result_HI);
1995}
1996
1997SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1998 SelectionDAG &DAG) const {
1999 SDValue Src = Op.getOperand(2);
2000 if (isa<FrameIndexSDNode>(Src.getNode())) {
2001 // CopyToReg nodes don't support FrameIndex operands. Other targets select
2002 // the FI to some LEA-like instruction, but since we don't have that, we
2003 // need to insert some kind of instruction that can take an FI operand and
2004 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
2005 // local.copy between Op and its FI operand.
2006 SDValue Chain = Op.getOperand(0);
2007 SDLoc DL(Op);
2008 Register Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
2009 EVT VT = Src.getValueType();
2010 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
2011 : WebAssembly::COPY_I64,
2012 DL, VT, Src),
2013 0);
2014 return Op.getNode()->getNumValues() == 1
2015 ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
2016 : DAG.getCopyToReg(Chain, DL, Reg, Copy,
2017 Op.getNumOperands() == 4 ? Op.getOperand(3)
2018 : SDValue());
2019 }
2020 return SDValue();
2021}
2022
2023SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
2024 SelectionDAG &DAG) const {
2025 int FI = cast<FrameIndexSDNode>(Op)->getIndex();
2026 return DAG.getTargetFrameIndex(FI, Op.getValueType());
2027}
2028
2029SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
2030 SelectionDAG &DAG) const {
2031 SDLoc DL(Op);
2032
2033 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
2034 fail(DL, DAG,
2035 "Non-Emscripten WebAssembly hasn't implemented "
2036 "__builtin_return_address");
2037 return SDValue();
2038 }
2039
2040 unsigned Depth = Op.getConstantOperandVal(0);
2041 MakeLibCallOptions CallOptions;
2042 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
2043 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
2044 .first;
2045}
2046
2047SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
2048 SelectionDAG &DAG) const {
2049 // Non-zero depths are not supported by WebAssembly currently. Use the
2050 // legalizer's default expansion, which is to return 0 (what this function is
2051 // documented to do).
2052 if (Op.getConstantOperandVal(0) > 0)
2053 return SDValue();
2054
2056 EVT VT = Op.getValueType();
2057 Register FP =
2058 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
2059 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
2060}
2061
2062SDValue
2063WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
2064 SelectionDAG &DAG) const {
2065 SDLoc DL(Op);
2066 const auto *GA = cast<GlobalAddressSDNode>(Op);
2067
2068 MachineFunction &MF = DAG.getMachineFunction();
2069 if (!MF.getSubtarget<WebAssemblySubtarget>().hasBulkMemory())
2070 report_fatal_error("cannot use thread-local storage without bulk memory",
2071 false);
2072
2073 const GlobalValue *GV = GA->getGlobal();
2074
2075 // Currently only Emscripten supports dynamic linking with threads. Therefore,
2076 // on other targets, if we have thread-local storage, only the local-exec
2077 // model is possible.
2078 auto model = Subtarget->getTargetTriple().isOSEmscripten()
2079 ? GV->getThreadLocalMode()
2081
2082 // Unsupported TLS modes
2085
2086 if (model == GlobalValue::LocalExecTLSModel ||
2089 getTargetMachine().shouldAssumeDSOLocal(GV))) {
2090 // For DSO-local TLS variables we use offset from __tls_base
2091
2092 MVT PtrVT = getPointerTy(DAG.getDataLayout());
2093 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
2094 : WebAssembly::GLOBAL_GET_I32;
2095 const char *BaseName = MF.createExternalSymbolName("__tls_base");
2096
2098 DAG.getMachineNode(GlobalGet, DL, PtrVT,
2099 DAG.getTargetExternalSymbol(BaseName, PtrVT)),
2100 0);
2101
2102 SDValue TLSOffset = DAG.getTargetGlobalAddress(
2103 GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL);
2104 SDValue SymOffset =
2105 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, TLSOffset);
2106
2107 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymOffset);
2108 }
2109
2111
2112 EVT VT = Op.getValueType();
2113 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
2114 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
2115 GA->getOffset(),
2117}
2118
2119SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
2120 SelectionDAG &DAG) const {
2121 SDLoc DL(Op);
2122 const auto *GA = cast<GlobalAddressSDNode>(Op);
2123 EVT VT = Op.getValueType();
2124 assert(GA->getTargetFlags() == 0 &&
2125 "Unexpected target flags on generic GlobalAddressSDNode");
2127 fail(DL, DAG, "Invalid address space for WebAssembly target");
2128
2129 unsigned OperandFlags = 0;
2130 const GlobalValue *GV = GA->getGlobal();
2131 // Since WebAssembly tables cannot yet be shared accross modules, we don't
2132 // need special treatment for tables in PIC mode.
2133 if (isPositionIndependent() &&
2135 if (getTargetMachine().shouldAssumeDSOLocal(GV)) {
2136 MachineFunction &MF = DAG.getMachineFunction();
2137 MVT PtrVT = getPointerTy(MF.getDataLayout());
2138 const char *BaseName;
2139 if (GV->getValueType()->isFunctionTy()) {
2140 BaseName = MF.createExternalSymbolName("__table_base");
2142 } else {
2143 BaseName = MF.createExternalSymbolName("__memory_base");
2145 }
2147 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
2148 DAG.getTargetExternalSymbol(BaseName, PtrVT));
2149
2150 SDValue SymAddr = DAG.getNode(
2151 WebAssemblyISD::WrapperREL, DL, VT,
2152 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
2153 OperandFlags));
2154
2155 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
2156 }
2158 }
2159
2160 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
2161 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
2162 GA->getOffset(), OperandFlags));
2163}
2164
2165SDValue
2166WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
2167 SelectionDAG &DAG) const {
2168 SDLoc DL(Op);
2169 const auto *ES = cast<ExternalSymbolSDNode>(Op);
2170 EVT VT = Op.getValueType();
2171 assert(ES->getTargetFlags() == 0 &&
2172 "Unexpected target flags on generic ExternalSymbolSDNode");
2173 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
2174 DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
2175}
2176
2177SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
2178 SelectionDAG &DAG) const {
2179 // There's no need for a Wrapper node because we always incorporate a jump
2180 // table operand into a BR_TABLE instruction, rather than ever
2181 // materializing it in a register.
2182 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2183 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
2184 JT->getTargetFlags());
2185}
2186
2187SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
2188 SelectionDAG &DAG) const {
2189 SDLoc DL(Op);
2190 SDValue Chain = Op.getOperand(0);
2191 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
2192 SDValue Index = Op.getOperand(2);
2193 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
2194
2196 Ops.push_back(Chain);
2197 Ops.push_back(Index);
2198
2199 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
2200 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
2201
2202 // Add an operand for each case.
2203 for (auto *MBB : MBBs)
2204 Ops.push_back(DAG.getBasicBlock(MBB));
2205
2206 // Add the first MBB as a dummy default target for now. This will be replaced
2207 // with the proper default target (and the preceding range check eliminated)
2208 // if possible by WebAssemblyFixBrTableDefaults.
2209 Ops.push_back(DAG.getBasicBlock(*MBBs.begin()));
2210 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
2211}
2212
2213SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
2214 SelectionDAG &DAG) const {
2215 SDLoc DL(Op);
2216 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
2217
2218 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
2219 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2220
2221 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
2222 MFI->getVarargBufferVreg(), PtrVT);
2223 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
2224 MachinePointerInfo(SV));
2225}
2226
2227SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
2228 SelectionDAG &DAG) const {
2229 MachineFunction &MF = DAG.getMachineFunction();
2230 unsigned IntNo;
2231 switch (Op.getOpcode()) {
2234 IntNo = Op.getConstantOperandVal(1);
2235 break;
2237 IntNo = Op.getConstantOperandVal(0);
2238 break;
2239 default:
2240 llvm_unreachable("Invalid intrinsic");
2241 }
2242 SDLoc DL(Op);
2243
2244 switch (IntNo) {
2245 default:
2246 return SDValue(); // Don't custom lower most intrinsics.
2247
2248 case Intrinsic::wasm_lsda: {
2249 auto PtrVT = getPointerTy(MF.getDataLayout());
2250 const char *SymName = MF.createExternalSymbolName(
2251 "GCC_except_table" + std::to_string(MF.getFunctionNumber()));
2252 if (isPositionIndependent()) {
2254 SymName, PtrVT, WebAssemblyII::MO_MEMORY_BASE_REL);
2255 const char *BaseName = MF.createExternalSymbolName("__memory_base");
2257 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
2258 DAG.getTargetExternalSymbol(BaseName, PtrVT));
2259 SDValue SymAddr =
2260 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, Node);
2261 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr);
2262 }
2263 SDValue Node = DAG.getTargetExternalSymbol(SymName, PtrVT);
2264 return DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, Node);
2265 }
2266
2267 case Intrinsic::wasm_shuffle: {
2268 // Drop in-chain and replace undefs, but otherwise pass through unchanged
2269 SDValue Ops[18];
2270 size_t OpIdx = 0;
2271 Ops[OpIdx++] = Op.getOperand(1);
2272 Ops[OpIdx++] = Op.getOperand(2);
2273 while (OpIdx < 18) {
2274 const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
2275 if (MaskIdx.isUndef() || MaskIdx.getNode()->getAsZExtVal() >= 32) {
2276 bool isTarget = MaskIdx.getNode()->getOpcode() == ISD::TargetConstant;
2277 Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32, isTarget);
2278 } else {
2279 Ops[OpIdx++] = MaskIdx;
2280 }
2281 }
2282 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
2283 }
2284
2285 case Intrinsic::thread_pointer: {
2286 MVT PtrVT = getPointerTy(DAG.getDataLayout());
2287 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
2288 : WebAssembly::GLOBAL_GET_I32;
2289 const char *TlsBase = MF.createExternalSymbolName("__tls_base");
2290 return SDValue(
2291 DAG.getMachineNode(GlobalGet, DL, PtrVT,
2292 DAG.getTargetExternalSymbol(TlsBase, PtrVT)),
2293 0);
2294 }
2295 }
2296}
2297
2298SDValue
2299WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
2300 SelectionDAG &DAG) const {
2301 SDLoc DL(Op);
2302 // If sign extension operations are disabled, allow sext_inreg only if operand
2303 // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
2304 // extension operations, but allowing sext_inreg in this context lets us have
2305 // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
2306 // everywhere would be simpler in this file, but would necessitate large and
2307 // brittle patterns to undo the expansion and select extract_lane_s
2308 // instructions.
2309 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
2310 if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2311 return SDValue();
2312
2313 const SDValue &Extract = Op.getOperand(0);
2314 MVT VecT = Extract.getOperand(0).getSimpleValueType();
2315 if (VecT.getVectorElementType().getSizeInBits() > 32)
2316 return SDValue();
2317 MVT ExtractedLaneT =
2318 cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT();
2319 MVT ExtractedVecT =
2320 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
2321 if (ExtractedVecT == VecT)
2322 return Op;
2323
2324 // Bitcast vector to appropriate type to ensure ISel pattern coverage
2325 const SDNode *Index = Extract.getOperand(1).getNode();
2326 if (!isa<ConstantSDNode>(Index))
2327 return SDValue();
2328 unsigned IndexVal = Index->getAsZExtVal();
2329 unsigned Scale =
2330 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
2331 assert(Scale > 1);
2332 SDValue NewIndex =
2333 DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0));
2334 SDValue NewExtract = DAG.getNode(
2336 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
2337 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
2338 Op.getOperand(1));
2339}
2340
2341static SDValue GetExtendHigh(SDValue Op, unsigned UserOpc, EVT VT,
2342 SelectionDAG &DAG) {
2343 SDValue Source = peekThroughBitcasts(Op);
2344 if (Source.getOpcode() != ISD::VECTOR_SHUFFLE)
2345 return SDValue();
2346
2347 assert((UserOpc == WebAssemblyISD::EXTEND_LOW_U ||
2348 UserOpc == WebAssemblyISD::EXTEND_LOW_S) &&
2349 "expected extend_low");
2350 auto *Shuffle = cast<ShuffleVectorSDNode>(Source.getNode());
2351
2352 ArrayRef<int> Mask = Shuffle->getMask();
2353 // Look for a shuffle which moves from the high half to the low half.
2354 size_t FirstIdx = Mask.size() / 2;
2355 for (size_t i = 0; i < Mask.size() / 2; ++i) {
2356 if (Mask[i] != static_cast<int>(FirstIdx + i)) {
2357 return SDValue();
2358 }
2359 }
2360
2361 SDLoc DL(Op);
2362 unsigned Opc = UserOpc == WebAssemblyISD::EXTEND_LOW_S
2363 ? WebAssemblyISD::EXTEND_HIGH_S
2364 : WebAssemblyISD::EXTEND_HIGH_U;
2365 SDValue ShuffleSrc = Shuffle->getOperand(0);
2366 if (Op.getOpcode() == ISD::BITCAST)
2367 ShuffleSrc = DAG.getBitcast(Op.getValueType(), ShuffleSrc);
2368
2369 return DAG.getNode(Opc, DL, VT, ShuffleSrc);
2370}
2371
2372SDValue
2373WebAssemblyTargetLowering::LowerEXTEND_VECTOR_INREG(SDValue Op,
2374 SelectionDAG &DAG) const {
2375 SDLoc DL(Op);
2376 EVT VT = Op.getValueType();
2377 SDValue Src = Op.getOperand(0);
2378 EVT SrcVT = Src.getValueType();
2379
2380 if (SrcVT.getVectorElementType() == MVT::i1 ||
2381 SrcVT.getVectorElementType() == MVT::i64)
2382 return SDValue();
2383
2384 assert(VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits() == 0 &&
2385 "Unexpected extension factor.");
2386 unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
2387
2388 if (Scale != 2 && Scale != 4 && Scale != 8)
2389 return SDValue();
2390
2391 unsigned Ext;
2392 switch (Op.getOpcode()) {
2393 default:
2394 llvm_unreachable("unexpected opcode");
2397 Ext = WebAssemblyISD::EXTEND_LOW_U;
2398 break;
2400 Ext = WebAssemblyISD::EXTEND_LOW_S;
2401 break;
2402 }
2403
2404 if (Scale == 2) {
2405 // See if we can use EXTEND_HIGH.
2406 if (auto ExtendHigh = GetExtendHigh(Op.getOperand(0), Ext, VT, DAG))
2407 return ExtendHigh;
2408 }
2409
2410 SDValue Ret = Src;
2411 while (Scale != 1) {
2412 Ret = DAG.getNode(Ext, DL,
2413 Ret.getValueType()
2416 Ret);
2417 Scale /= 2;
2418 }
2419 assert(Ret.getValueType() == VT);
2420 return Ret;
2421}
2422
2424 SDLoc DL(Op);
2425 if (Op.getValueType() != MVT::v2f64 && Op.getValueType() != MVT::v4f32)
2426 return SDValue();
2427
2428 auto GetConvertedLane = [](SDValue Op, unsigned &Opcode, SDValue &SrcVec,
2429 unsigned &Index) -> bool {
2430 switch (Op.getOpcode()) {
2431 case ISD::SINT_TO_FP:
2432 Opcode = WebAssemblyISD::CONVERT_LOW_S;
2433 break;
2434 case ISD::UINT_TO_FP:
2435 Opcode = WebAssemblyISD::CONVERT_LOW_U;
2436 break;
2437 case ISD::FP_EXTEND:
2438 case ISD::FP16_TO_FP:
2439 Opcode = WebAssemblyISD::PROMOTE_LOW;
2440 break;
2441 default:
2442 return false;
2443 }
2444
2445 auto ExtractVector = Op.getOperand(0);
2446 if (ExtractVector.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2447 return false;
2448
2449 if (!isa<ConstantSDNode>(ExtractVector.getOperand(1).getNode()))
2450 return false;
2451
2452 SrcVec = ExtractVector.getOperand(0);
2453 Index = ExtractVector.getConstantOperandVal(1);
2454 return true;
2455 };
2456
2457 unsigned NumLanes = Op.getValueType() == MVT::v2f64 ? 2 : 4;
2458 unsigned FirstOpcode = 0, SecondOpcode = 0, ThirdOpcode = 0, FourthOpcode = 0;
2459 unsigned FirstIndex = 0, SecondIndex = 0, ThirdIndex = 0, FourthIndex = 0;
2460 SDValue FirstSrcVec, SecondSrcVec, ThirdSrcVec, FourthSrcVec;
2461
2462 if (!GetConvertedLane(Op.getOperand(0), FirstOpcode, FirstSrcVec,
2463 FirstIndex) ||
2464 !GetConvertedLane(Op.getOperand(1), SecondOpcode, SecondSrcVec,
2465 SecondIndex))
2466 return SDValue();
2467
2468 // If we're converting to v4f32, check the third and fourth lanes, too.
2469 if (NumLanes == 4 && (!GetConvertedLane(Op.getOperand(2), ThirdOpcode,
2470 ThirdSrcVec, ThirdIndex) ||
2471 !GetConvertedLane(Op.getOperand(3), FourthOpcode,
2472 FourthSrcVec, FourthIndex)))
2473 return SDValue();
2474
2475 if (FirstOpcode != SecondOpcode)
2476 return SDValue();
2477
2478 // TODO Add an optimization similar to the v2f64 below for shuffling the
2479 // vectors when the lanes are in the wrong order or come from different src
2480 // vectors.
2481 if (NumLanes == 4 &&
2482 (FirstOpcode != ThirdOpcode || FirstOpcode != FourthOpcode ||
2483 FirstSrcVec != SecondSrcVec || FirstSrcVec != ThirdSrcVec ||
2484 FirstSrcVec != FourthSrcVec || FirstIndex != 0 || SecondIndex != 1 ||
2485 ThirdIndex != 2 || FourthIndex != 3))
2486 return SDValue();
2487
2488 MVT ExpectedSrcVT;
2489 switch (FirstOpcode) {
2490 case WebAssemblyISD::CONVERT_LOW_S:
2491 case WebAssemblyISD::CONVERT_LOW_U:
2492 ExpectedSrcVT = MVT::v4i32;
2493 break;
2494 case WebAssemblyISD::PROMOTE_LOW:
2495 ExpectedSrcVT = NumLanes == 2 ? MVT::v4f32 : MVT::v8i16;
2496 break;
2497 }
2498 if (FirstSrcVec.getValueType() != ExpectedSrcVT)
2499 return SDValue();
2500
2501 auto Src = FirstSrcVec;
2502 if (NumLanes == 2 &&
2503 (FirstIndex != 0 || SecondIndex != 1 || FirstSrcVec != SecondSrcVec)) {
2504 // Shuffle the source vector so that the converted lanes are the low lanes.
2505 Src = DAG.getVectorShuffle(ExpectedSrcVT, DL, FirstSrcVec, SecondSrcVec,
2506 {static_cast<int>(FirstIndex),
2507 static_cast<int>(SecondIndex) + 4, -1, -1});
2508 }
2509 return DAG.getNode(FirstOpcode, DL, NumLanes == 2 ? MVT::v2f64 : MVT::v4f32,
2510 Src);
2511}
2512
2513SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
2514 SelectionDAG &DAG) const {
2515 MVT VT = Op.getSimpleValueType();
2516 if (VT == MVT::v8f16) {
2517 // BUILD_VECTOR can't handle FP16 operands since Wasm doesn't have a scaler
2518 // FP16 type, so cast them to I16s.
2519 MVT IVT = VT.changeVectorElementType(MVT::i16);
2521 for (unsigned I = 0, E = Op.getNumOperands(); I < E; ++I)
2522 NewOps.push_back(DAG.getBitcast(MVT::i16, Op.getOperand(I)));
2523 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(), IVT, NewOps);
2524 return DAG.getBitcast(VT, Res);
2525 }
2526
2527 if (auto ConvertLow = LowerConvertLow(Op, DAG))
2528 return ConvertLow;
2529
2530 SDLoc DL(Op);
2531 const EVT VecT = Op.getValueType();
2532 const EVT LaneT = Op.getOperand(0).getValueType();
2533 const size_t Lanes = Op.getNumOperands();
2534 bool CanSwizzle = VecT == MVT::v16i8;
2535
2536 // BUILD_VECTORs are lowered to the instruction that initializes the highest
2537 // possible number of lanes at once followed by a sequence of replace_lane
2538 // instructions to individually initialize any remaining lanes.
2539
2540 // TODO: Tune this. For example, lanewise swizzling is very expensive, so
2541 // swizzled lanes should be given greater weight.
2542
2543 // TODO: Investigate looping rather than always extracting/replacing specific
2544 // lanes to fill gaps.
2545
2546 auto IsConstant = [](const SDValue &V) {
2547 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
2548 };
2549
2550 // Returns the source vector and index vector pair if they exist. Checks for:
2551 // (extract_vector_elt
2552 // $src,
2553 // (sign_extend_inreg (extract_vector_elt $indices, $i))
2554 // )
2555 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
2556 auto Bail = std::make_pair(SDValue(), SDValue());
2557 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2558 return Bail;
2559 const SDValue &SwizzleSrc = Lane->getOperand(0);
2560 const SDValue &IndexExt = Lane->getOperand(1);
2561 if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
2562 return Bail;
2563 const SDValue &Index = IndexExt->getOperand(0);
2564 if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2565 return Bail;
2566 const SDValue &SwizzleIndices = Index->getOperand(0);
2567 if (SwizzleSrc.getValueType() != MVT::v16i8 ||
2568 SwizzleIndices.getValueType() != MVT::v16i8 ||
2569 Index->getOperand(1)->getOpcode() != ISD::Constant ||
2570 Index->getConstantOperandVal(1) != I)
2571 return Bail;
2572 return std::make_pair(SwizzleSrc, SwizzleIndices);
2573 };
2574
2575 // If the lane is extracted from another vector at a constant index, return
2576 // that vector. The source vector must not have more lanes than the dest
2577 // because the shufflevector indices are in terms of the destination lanes and
2578 // would not be able to address the smaller individual source lanes.
2579 auto GetShuffleSrc = [&](const SDValue &Lane) {
2580 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2581 return SDValue();
2582 if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode()))
2583 return SDValue();
2584 if (Lane->getOperand(0).getValueType().getVectorNumElements() >
2585 VecT.getVectorNumElements())
2586 return SDValue();
2587 return Lane->getOperand(0);
2588 };
2589
2590 using ValueEntry = std::pair<SDValue, size_t>;
2591 SmallVector<ValueEntry, 16> SplatValueCounts;
2592
2593 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
2594 SmallVector<SwizzleEntry, 16> SwizzleCounts;
2595
2596 using ShuffleEntry = std::pair<SDValue, size_t>;
2597 SmallVector<ShuffleEntry, 16> ShuffleCounts;
2598
2599 auto AddCount = [](auto &Counts, const auto &Val) {
2600 auto CountIt =
2601 llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; });
2602 if (CountIt == Counts.end()) {
2603 Counts.emplace_back(Val, 1);
2604 } else {
2605 CountIt->second++;
2606 }
2607 };
2608
2609 auto GetMostCommon = [](auto &Counts) {
2610 auto CommonIt = llvm::max_element(Counts, llvm::less_second());
2611 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector");
2612 return *CommonIt;
2613 };
2614
2615 size_t NumConstantLanes = 0;
2616
2617 // Count eligible lanes for each type of vector creation op
2618 for (size_t I = 0; I < Lanes; ++I) {
2619 const SDValue &Lane = Op->getOperand(I);
2620 if (Lane.isUndef())
2621 continue;
2622
2623 AddCount(SplatValueCounts, Lane);
2624
2625 if (IsConstant(Lane))
2626 NumConstantLanes++;
2627 if (auto ShuffleSrc = GetShuffleSrc(Lane))
2628 AddCount(ShuffleCounts, ShuffleSrc);
2629 if (CanSwizzle) {
2630 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
2631 if (SwizzleSrcs.first)
2632 AddCount(SwizzleCounts, SwizzleSrcs);
2633 }
2634 }
2635
2636 SDValue SplatValue;
2637 size_t NumSplatLanes;
2638 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
2639
2640 SDValue SwizzleSrc;
2641 SDValue SwizzleIndices;
2642 size_t NumSwizzleLanes = 0;
2643 if (SwizzleCounts.size())
2644 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
2645 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
2646
2647 // Shuffles can draw from up to two vectors, so find the two most common
2648 // sources.
2649 SDValue ShuffleSrc1, ShuffleSrc2;
2650 size_t NumShuffleLanes = 0;
2651 if (ShuffleCounts.size()) {
2652 std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
2653 llvm::erase_if(ShuffleCounts,
2654 [&](const auto &Pair) { return Pair.first == ShuffleSrc1; });
2655 }
2656 if (ShuffleCounts.size()) {
2657 size_t AdditionalShuffleLanes;
2658 std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
2659 GetMostCommon(ShuffleCounts);
2660 NumShuffleLanes += AdditionalShuffleLanes;
2661 }
2662
2663 // Predicate returning true if the lane is properly initialized by the
2664 // original instruction
2665 std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
2667 // Prefer swizzles over shuffles over vector consts over splats
2668 if (NumSwizzleLanes >= NumShuffleLanes &&
2669 NumSwizzleLanes >= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
2670 Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
2671 SwizzleIndices);
2672 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
2673 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
2674 return Swizzled == GetSwizzleSrcs(I, Lane);
2675 };
2676 } else if (NumShuffleLanes >= NumConstantLanes &&
2677 NumShuffleLanes >= NumSplatLanes) {
2678 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits() / 8;
2679 size_t DestLaneCount = VecT.getVectorNumElements();
2680 size_t Scale1 = 1;
2681 size_t Scale2 = 1;
2682 SDValue Src1 = ShuffleSrc1;
2683 SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VecT);
2684 if (Src1.getValueType() != VecT) {
2685 size_t LaneSize =
2687 assert(LaneSize > DestLaneSize);
2688 Scale1 = LaneSize / DestLaneSize;
2689 Src1 = DAG.getBitcast(VecT, Src1);
2690 }
2691 if (Src2.getValueType() != VecT) {
2692 size_t LaneSize =
2694 assert(LaneSize > DestLaneSize);
2695 Scale2 = LaneSize / DestLaneSize;
2696 Src2 = DAG.getBitcast(VecT, Src2);
2697 }
2698
2699 int Mask[16];
2700 assert(DestLaneCount <= 16);
2701 for (size_t I = 0; I < DestLaneCount; ++I) {
2702 const SDValue &Lane = Op->getOperand(I);
2703 SDValue Src = GetShuffleSrc(Lane);
2704 if (Src == ShuffleSrc1) {
2705 Mask[I] = Lane->getConstantOperandVal(1) * Scale1;
2706 } else if (Src && Src == ShuffleSrc2) {
2707 Mask[I] = DestLaneCount + Lane->getConstantOperandVal(1) * Scale2;
2708 } else {
2709 Mask[I] = -1;
2710 }
2711 }
2712 ArrayRef<int> MaskRef(Mask, DestLaneCount);
2713 Result = DAG.getVectorShuffle(VecT, DL, Src1, Src2, MaskRef);
2714 IsLaneConstructed = [&](size_t, const SDValue &Lane) {
2715 auto Src = GetShuffleSrc(Lane);
2716 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
2717 };
2718 } else if (NumConstantLanes >= NumSplatLanes) {
2719 SmallVector<SDValue, 16> ConstLanes;
2720 for (const SDValue &Lane : Op->op_values()) {
2721 if (IsConstant(Lane)) {
2722 // Values may need to be fixed so that they will sign extend to be
2723 // within the expected range during ISel. Check whether the value is in
2724 // bounds based on the lane bit width and if it is out of bounds, lop
2725 // off the extra bits.
2726 uint64_t LaneBits = 128 / Lanes;
2727 if (auto *Const = dyn_cast<ConstantSDNode>(Lane.getNode())) {
2728 ConstLanes.push_back(DAG.getConstant(
2729 Const->getAPIntValue().trunc(LaneBits).getZExtValue(),
2730 SDLoc(Lane), LaneT));
2731 } else {
2732 ConstLanes.push_back(Lane);
2733 }
2734 } else if (LaneT.isFloatingPoint()) {
2735 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
2736 } else {
2737 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
2738 }
2739 }
2740 Result = DAG.getBuildVector(VecT, DL, ConstLanes);
2741 IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) {
2742 return IsConstant(Lane);
2743 };
2744 } else {
2745 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits();
2746 if (NumSplatLanes == 1 && Op->getOperand(0) == SplatValue &&
2747 (DestLaneSize == 32 || DestLaneSize == 64)) {
2748 // Could be selected to load_zero.
2749 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecT, SplatValue);
2750 } else {
2751 // Use a splat (which might be selected as a load splat)
2752 Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
2753 }
2754 IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) {
2755 return Lane == SplatValue;
2756 };
2757 }
2758
2759 assert(Result);
2760 assert(IsLaneConstructed);
2761
2762 // Add replace_lane instructions for any unhandled values
2763 for (size_t I = 0; I < Lanes; ++I) {
2764 const SDValue &Lane = Op->getOperand(I);
2765 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
2766 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
2767 DAG.getConstant(I, DL, MVT::i32));
2768 }
2769
2770 return Result;
2771}
2772
2773SDValue
2774WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
2775 SelectionDAG &DAG) const {
2776 SDLoc DL(Op);
2777 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
2778 MVT VecType = Op.getOperand(0).getSimpleValueType();
2779 assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
2780 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
2781
2782 // Space for two vector args and sixteen mask indices
2783 SDValue Ops[18];
2784 size_t OpIdx = 0;
2785 Ops[OpIdx++] = Op.getOperand(0);
2786 Ops[OpIdx++] = Op.getOperand(1);
2787
2788 // Expand mask indices to byte indices and materialize them as operands
2789 for (int M : Mask) {
2790 for (size_t J = 0; J < LaneBytes; ++J) {
2791 // Lower undefs (represented by -1 in mask) to {0..J}, which use a
2792 // whole lane of vector input, to allow further reduction at VM. E.g.
2793 // match an 8x16 byte shuffle to an equivalent cheaper 32x4 shuffle.
2794 uint64_t ByteIndex = M == -1 ? J : (uint64_t)M * LaneBytes + J;
2795 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
2796 }
2797 }
2798
2799 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
2800}
2801
2802SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
2803 SelectionDAG &DAG) const {
2804 SDLoc DL(Op);
2805 // The legalizer does not know how to expand the unsupported comparison modes
2806 // of i64x2 vectors, so we manually unroll them here.
2807 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64);
2809 DAG.ExtractVectorElements(Op->getOperand(0), LHS);
2810 DAG.ExtractVectorElements(Op->getOperand(1), RHS);
2811 const SDValue &CC = Op->getOperand(2);
2812 auto MakeLane = [&](unsigned I) {
2813 return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
2814 DAG.getConstant(uint64_t(-1), DL, MVT::i64),
2815 DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
2816 };
2817 return DAG.getBuildVector(Op->getValueType(0), DL,
2818 {MakeLane(0), MakeLane(1)});
2819}
2820
2821SDValue
2822WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
2823 SelectionDAG &DAG) const {
2824 // Allow constant lane indices, expand variable lane indices
2825 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
2826 if (isa<ConstantSDNode>(IdxNode)) {
2827 // Ensure the index type is i32 to match the tablegen patterns
2828 uint64_t Idx = IdxNode->getAsZExtVal();
2829 SmallVector<SDValue, 3> Ops(Op.getNode()->ops());
2830 Ops[Op.getNumOperands() - 1] =
2831 DAG.getConstant(Idx, SDLoc(IdxNode), MVT::i32);
2832 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), Ops);
2833 }
2834 // Perform default expansion
2835 return SDValue();
2836}
2837
2839 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
2840 // 32-bit and 64-bit unrolled shifts will have proper semantics
2841 if (LaneT.bitsGE(MVT::i32))
2842 return DAG.UnrollVectorOp(Op.getNode());
2843 // Otherwise mask the shift value to get proper semantics from 32-bit shift
2844 SDLoc DL(Op);
2845 size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
2846 SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32);
2847 unsigned ShiftOpcode = Op.getOpcode();
2848 SmallVector<SDValue, 16> ShiftedElements;
2849 DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32);
2850 SmallVector<SDValue, 16> ShiftElements;
2851 DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32);
2852 SmallVector<SDValue, 16> UnrolledOps;
2853 for (size_t i = 0; i < NumLanes; ++i) {
2854 SDValue MaskedShiftValue =
2855 DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask);
2856 SDValue ShiftedValue = ShiftedElements[i];
2857 if (ShiftOpcode == ISD::SRA)
2858 ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32,
2859 ShiftedValue, DAG.getValueType(LaneT));
2860 UnrolledOps.push_back(
2861 DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
2862 }
2863 return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps);
2864}
2865
2866SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
2867 SelectionDAG &DAG) const {
2868 SDLoc DL(Op);
2869 // Only manually lower vector shifts
2870 assert(Op.getSimpleValueType().isVector());
2871
2872 uint64_t LaneBits = Op.getValueType().getScalarSizeInBits();
2873 auto ShiftVal = Op.getOperand(1);
2874
2875 // Try to skip bitmask operation since it is implied inside shift instruction
2876 auto SkipImpliedMask = [](SDValue MaskOp, uint64_t MaskBits) {
2877 if (MaskOp.getOpcode() != ISD::AND)
2878 return MaskOp;
2879 SDValue LHS = MaskOp.getOperand(0);
2880 SDValue RHS = MaskOp.getOperand(1);
2881 if (MaskOp.getValueType().isVector()) {
2882 APInt MaskVal;
2883 if (!ISD::isConstantSplatVector(RHS.getNode(), MaskVal))
2884 std::swap(LHS, RHS);
2885
2886 if (ISD::isConstantSplatVector(RHS.getNode(), MaskVal) &&
2887 MaskVal == MaskBits)
2888 MaskOp = LHS;
2889 } else {
2890 if (!isa<ConstantSDNode>(RHS.getNode()))
2891 std::swap(LHS, RHS);
2892
2893 auto ConstantRHS = dyn_cast<ConstantSDNode>(RHS.getNode());
2894 if (ConstantRHS && ConstantRHS->getAPIntValue() == MaskBits)
2895 MaskOp = LHS;
2896 }
2897
2898 return MaskOp;
2899 };
2900
2901 // Skip vector and operation
2902 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2903 ShiftVal = DAG.getSplatValue(ShiftVal);
2904 if (!ShiftVal)
2905 return unrollVectorShift(Op, DAG);
2906
2907 // Skip scalar and operation
2908 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2909 // Use anyext because none of the high bits can affect the shift
2910 ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32);
2911
2912 unsigned Opcode;
2913 switch (Op.getOpcode()) {
2914 case ISD::SHL:
2915 Opcode = WebAssemblyISD::VEC_SHL;
2916 break;
2917 case ISD::SRA:
2918 Opcode = WebAssemblyISD::VEC_SHR_S;
2919 break;
2920 case ISD::SRL:
2921 Opcode = WebAssemblyISD::VEC_SHR_U;
2922 break;
2923 default:
2924 llvm_unreachable("unexpected opcode");
2925 }
2926
2927 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
2928}
2929
2930SDValue WebAssemblyTargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
2931 SelectionDAG &DAG) const {
2932 EVT ResT = Op.getValueType();
2933 EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2934
2935 if ((ResT == MVT::i32 || ResT == MVT::i64) &&
2936 (SatVT == MVT::i32 || SatVT == MVT::i64))
2937 return Op;
2938
2939 if (ResT == MVT::v4i32 && SatVT == MVT::i32)
2940 return Op;
2941
2942 if (ResT == MVT::v8i16 && SatVT == MVT::i16)
2943 return Op;
2944
2945 return SDValue();
2946}
2947
2949 return (Op->getFlags().hasNoNaNs() ||
2950 (DAG.isKnownNeverNaN(Op->getOperand(0)) &&
2951 DAG.isKnownNeverNaN(Op->getOperand(1)))) &&
2952 (Op->getFlags().hasNoSignedZeros() ||
2953 DAG.isKnownNeverLogicalZero(Op->getOperand(0)) ||
2954 DAG.isKnownNeverLogicalZero(Op->getOperand(1)));
2955}
2956
2957SDValue WebAssemblyTargetLowering::LowerFMIN(SDValue Op,
2958 SelectionDAG &DAG) const {
2959 if (Subtarget->hasRelaxedSIMD() && HasNoSignedZerosOrNaNs(Op, DAG)) {
2960 return DAG.getNode(WebAssemblyISD::RELAXED_FMIN, SDLoc(Op),
2961 Op.getValueType(), Op.getOperand(0), Op.getOperand(1));
2962 }
2963 return SDValue();
2964}
2965
2966SDValue WebAssemblyTargetLowering::LowerFMAX(SDValue Op,
2967 SelectionDAG &DAG) const {
2968 if (Subtarget->hasRelaxedSIMD() && HasNoSignedZerosOrNaNs(Op, DAG)) {
2969 return DAG.getNode(WebAssemblyISD::RELAXED_FMAX, SDLoc(Op),
2970 Op.getValueType(), Op.getOperand(0), Op.getOperand(1));
2971 }
2972 return SDValue();
2973}
2974
2975//===----------------------------------------------------------------------===//
2976// Custom DAG combine hooks
2977//===----------------------------------------------------------------------===//
2978static SDValue
2980 auto &DAG = DCI.DAG;
2981 auto Shuffle = cast<ShuffleVectorSDNode>(N);
2982
2983 // Hoist vector bitcasts that don't change the number of lanes out of unary
2984 // shuffles, where they are less likely to get in the way of other combines.
2985 // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
2986 // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
2987 SDValue Bitcast = N->getOperand(0);
2988 if (Bitcast.getOpcode() != ISD::BITCAST)
2989 return SDValue();
2990 if (!N->getOperand(1).isUndef())
2991 return SDValue();
2992 SDValue CastOp = Bitcast.getOperand(0);
2993 EVT SrcType = CastOp.getValueType();
2994 EVT DstType = Bitcast.getValueType();
2995 if (!SrcType.is128BitVector() ||
2996 SrcType.getVectorNumElements() != DstType.getVectorNumElements())
2997 return SDValue();
2998 SDValue NewShuffle = DAG.getVectorShuffle(
2999 SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
3000 return DAG.getBitcast(DstType, NewShuffle);
3001}
3002
3003/// Convert ({u,s}itofp vec) --> ({u,s}itofp ({s,z}ext vec)) so it doesn't get
3004/// split up into scalar instructions during legalization, and the vector
3005/// extending instructions are selected in performVectorExtendCombine below.
3006static SDValue
3009 auto &DAG = DCI.DAG;
3010 assert(N->getOpcode() == ISD::UINT_TO_FP ||
3011 N->getOpcode() == ISD::SINT_TO_FP);
3012
3013 EVT InVT = N->getOperand(0)->getValueType(0);
3014 EVT ResVT = N->getValueType(0);
3015 MVT ExtVT;
3016 if (ResVT == MVT::v4f32 && (InVT == MVT::v4i16 || InVT == MVT::v4i8))
3017 ExtVT = MVT::v4i32;
3018 else if (ResVT == MVT::v2f64 && (InVT == MVT::v2i16 || InVT == MVT::v2i8))
3019 ExtVT = MVT::v2i32;
3020 else
3021 return SDValue();
3022
3023 unsigned Op =
3025 SDValue Conv = DAG.getNode(Op, SDLoc(N), ExtVT, N->getOperand(0));
3026 return DAG.getNode(N->getOpcode(), SDLoc(N), ResVT, Conv);
3027}
3028
3029static SDValue
3032 auto &DAG = DCI.DAG;
3033
3034 SDNodeFlags Flags = N->getFlags();
3035 SDValue Op0 = N->getOperand(0);
3036 EVT VT = N->getValueType(0);
3037
3038 // Optimize uitofp to sitofp when the sign bit is known to be zero.
3039 // Depending on the target (runtime) backend, this might be performance
3040 // neutral (e.g. AArch64) or a significant improvement (e.g. x86_64).
3041 if (VT.isVector() && (Flags.hasNonNeg() || DAG.SignBitIsZero(Op0))) {
3042 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
3043 }
3044
3045 return SDValue();
3046}
3047
3048static SDValue
3050 auto &DAG = DCI.DAG;
3051 assert(N->getOpcode() == ISD::SIGN_EXTEND ||
3052 N->getOpcode() == ISD::ZERO_EXTEND);
3053
3054 EVT ResVT = N->getValueType(0);
3055 bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
3056 SDLoc DL(N);
3057
3058 if (ResVT == MVT::v16i32 && N->getOperand(0)->getValueType(0) == MVT::v16i8) {
3059 // Use a tree of extend low/high to split and extend the input in two
3060 // layers to avoid doing several shuffles and even more extends.
3061 unsigned LowOp =
3062 IsSext ? WebAssemblyISD::EXTEND_LOW_S : WebAssemblyISD::EXTEND_LOW_U;
3063 unsigned HighOp =
3064 IsSext ? WebAssemblyISD::EXTEND_HIGH_S : WebAssemblyISD::EXTEND_HIGH_U;
3065 SDValue Input = N->getOperand(0);
3066 SDValue LowHalf = DAG.getNode(LowOp, DL, MVT::v8i16, Input);
3067 SDValue HighHalf = DAG.getNode(HighOp, DL, MVT::v8i16, Input);
3068 SDValue Subvectors[] = {
3069 DAG.getNode(LowOp, DL, MVT::v4i32, LowHalf),
3070 DAG.getNode(HighOp, DL, MVT::v4i32, LowHalf),
3071 DAG.getNode(LowOp, DL, MVT::v4i32, HighHalf),
3072 DAG.getNode(HighOp, DL, MVT::v4i32, HighHalf),
3073 };
3074 return DAG.getNode(ISD::CONCAT_VECTORS, DL, ResVT, Subvectors);
3075 }
3076
3077 // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if
3078 // possible before the extract_subvector can be expanded.
3079 auto Extract = N->getOperand(0);
3080 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
3081 return SDValue();
3082 auto Source = Extract.getOperand(0);
3083 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
3084 if (IndexNode == nullptr)
3085 return SDValue();
3086 auto Index = IndexNode->getZExtValue();
3087
3088 // Only v8i8, v4i16, and v2i32 extracts can be widened, and only if the
3089 // extracted subvector is the low or high half of its source.
3090 if (ResVT == MVT::v8i16) {
3091 if (Extract.getValueType() != MVT::v8i8 ||
3092 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
3093 return SDValue();
3094 } else if (ResVT == MVT::v4i32) {
3095 if (Extract.getValueType() != MVT::v4i16 ||
3096 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
3097 return SDValue();
3098 } else if (ResVT == MVT::v2i64) {
3099 if (Extract.getValueType() != MVT::v2i32 ||
3100 Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
3101 return SDValue();
3102 } else {
3103 return SDValue();
3104 }
3105
3106 bool IsLow = Index == 0;
3107
3108 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
3109 : WebAssemblyISD::EXTEND_HIGH_S)
3110 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
3111 : WebAssemblyISD::EXTEND_HIGH_U);
3112
3113 return DAG.getNode(Op, DL, ResVT, Source);
3114}
3115
3116static SDValue
3118 auto &DAG = DCI.DAG;
3119
3120 auto GetWasmConversionOp = [](unsigned Op) {
3121 switch (Op) {
3123 return WebAssemblyISD::TRUNC_SAT_ZERO_S;
3125 return WebAssemblyISD::TRUNC_SAT_ZERO_U;
3126 case ISD::FP_ROUND:
3127 return WebAssemblyISD::DEMOTE_ZERO;
3128 }
3129 llvm_unreachable("unexpected op");
3130 };
3131
3132 auto IsZeroSplat = [](SDValue SplatVal) {
3133 auto *Splat = dyn_cast<BuildVectorSDNode>(SplatVal.getNode());
3134 APInt SplatValue, SplatUndef;
3135 unsigned SplatBitSize;
3136 bool HasAnyUndefs;
3137 // Endianness doesn't matter in this context because we are looking for
3138 // an all-zero value.
3139 return Splat &&
3140 Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
3141 HasAnyUndefs) &&
3142 SplatValue == 0;
3143 };
3144
3145 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
3146 // Combine this:
3147 //
3148 // (concat_vectors (v2i32 (fp_to_{s,u}int_sat $x, 32)), (v2i32 (splat 0)))
3149 //
3150 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
3151 //
3152 // Or this:
3153 //
3154 // (concat_vectors ({v2f32, v4f16} (fp_round ({v2f64, v4f32} $x))),
3155 // ({v2f32, v4f16} (splat 0)))
3156 //
3157 // into ({f32x4, f16x8}.demote_zero_{f64x2, f32x4} $x).
3158 EVT ResVT;
3159 EVT ExpectedConversionType;
3160 auto Conversion = N->getOperand(0);
3161 auto ConversionOp = Conversion.getOpcode();
3162 switch (ConversionOp) {
3165 ResVT = MVT::v4i32;
3166 ExpectedConversionType = MVT::v2i32;
3167 break;
3168 case ISD::FP_ROUND:
3169 if (Conversion.getValueType() == MVT::v2f32) {
3170 ResVT = MVT::v4f32;
3171 ExpectedConversionType = MVT::v2f32;
3172 } else if (Conversion.getValueType() == MVT::v4f16) {
3173 ResVT = MVT::v8f16;
3174 ExpectedConversionType = MVT::v4f16;
3175 } else {
3176 return SDValue();
3177 }
3178 break;
3179 default:
3180 return SDValue();
3181 }
3182
3183 if (N->getValueType(0) != ResVT)
3184 return SDValue();
3185
3186 if (Conversion.getValueType() != ExpectedConversionType)
3187 return SDValue();
3188
3189 auto Source = Conversion.getOperand(0);
3190 if (!((Source.getValueType() == MVT::v2f64 && ResVT == MVT::v4f32) ||
3191 (Source.getValueType() == MVT::v2f64 && ResVT == MVT::v4i32) ||
3192 (Source.getValueType() == MVT::v4f32 && ResVT == MVT::v8f16)))
3193 return SDValue();
3194
3195 if (!IsZeroSplat(N->getOperand(1)) ||
3196 N->getOperand(1).getValueType() != ExpectedConversionType)
3197 return SDValue();
3198
3199 unsigned Op = GetWasmConversionOp(ConversionOp);
3200 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
3201 }
3202
3203 // Combine this:
3204 //
3205 // (fp_to_{s,u}int_sat (concat_vectors $x, (v2f64 (splat 0))), 32)
3206 //
3207 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
3208 //
3209 // Or this:
3210 //
3211 // ({v4f32, v8f16} (fp_round (concat_vectors $x,
3212 // ({v2f64, v4f32} (splat 0)))))
3213 //
3214 // into ({f32x4, f16x8}.demote_zero_{f64x2, f32x4} $x).
3215 EVT ResVT;
3216 auto ConversionOp = N->getOpcode();
3217 switch (ConversionOp) {
3220 ResVT = MVT::v4i32;
3221 break;
3222 case ISD::FP_ROUND:
3223 ResVT = N->getValueType(0);
3224 break;
3225 default:
3226 llvm_unreachable("unexpected op");
3227 }
3228
3229 if (N->getValueType(0) != ResVT)
3230 return SDValue();
3231
3232 auto Concat = N->getOperand(0);
3233 if (Concat.getOpcode() != ISD::CONCAT_VECTORS)
3234 return SDValue();
3235 EVT ConcatVT = Concat.getValueType();
3236 EVT SourceVT = Concat.getOperand(0).getValueType();
3237
3238 if (!IsZeroSplat(Concat.getOperand(1)))
3239 return SDValue();
3240
3241 if (ConversionOp == ISD::FP_ROUND) {
3242 bool IsF64ToF32 =
3243 ConcatVT == MVT::v4f64 && SourceVT == MVT::v2f64 && ResVT == MVT::v4f32;
3244 bool IsF32ToF16 =
3245 ConcatVT == MVT::v8f32 && SourceVT == MVT::v4f32 && ResVT == MVT::v8f16;
3246 if (!(IsF64ToF32 || IsF32ToF16))
3247 return SDValue();
3248 } else {
3249 if (ConcatVT != MVT::v4f64 || SourceVT != MVT::v2f64 || ResVT != MVT::v4i32)
3250 return SDValue();
3251 }
3252
3253 unsigned Op = GetWasmConversionOp(ConversionOp);
3254 return DAG.getNode(Op, SDLoc(N), ResVT, Concat.getOperand(0));
3255}
3256
3257// Helper to extract VectorWidth bits from Vec, starting from IdxVal.
3258static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
3259 const SDLoc &DL, unsigned VectorWidth) {
3260 EVT VT = Vec.getValueType();
3261 EVT ElVT = VT.getVectorElementType();
3262 unsigned Factor = VT.getSizeInBits() / VectorWidth;
3263 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
3264 VT.getVectorNumElements() / Factor);
3265
3266 // Extract the relevant VectorWidth bits. Generate an EXTRACT_SUBVECTOR
3267 unsigned ElemsPerChunk = VectorWidth / ElVT.getSizeInBits();
3268 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
3269
3270 // This is the index of the first element of the VectorWidth-bit chunk
3271 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
3272 IdxVal &= ~(ElemsPerChunk - 1);
3273
3274 // If the input is a buildvector just emit a smaller one.
3275 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
3276 return DAG.getBuildVector(ResultVT, DL,
3277 Vec->ops().slice(IdxVal, ElemsPerChunk));
3278
3279 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, DL);
3280 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResultVT, Vec, VecIdx);
3281}
3282
3283// Helper to recursively truncate vector elements in half with NARROW_U. DstVT
3284// is the expected destination value type after recursion. In is the initial
3285// input. Note that the input should have enough leading zero bits to prevent
3286// NARROW_U from saturating results.
3288 SelectionDAG &DAG) {
3289 EVT SrcVT = In.getValueType();
3290
3291 // No truncation required, we might get here due to recursive calls.
3292 if (SrcVT == DstVT)
3293 return In;
3294
3295 unsigned SrcSizeInBits = SrcVT.getSizeInBits();
3296 unsigned NumElems = SrcVT.getVectorNumElements();
3297 if (!isPowerOf2_32(NumElems))
3298 return SDValue();
3299 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
3300 assert(SrcSizeInBits > DstVT.getSizeInBits() && "Illegal truncation");
3301
3302 LLVMContext &Ctx = *DAG.getContext();
3303 EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
3304
3305 // Narrow to the largest type possible:
3306 // vXi64/vXi32 -> i16x8.narrow_i32x4_u and vXi16 -> i8x16.narrow_i16x8_u.
3307 EVT InVT = MVT::i16, OutVT = MVT::i8;
3308 if (SrcVT.getScalarSizeInBits() > 16) {
3309 InVT = MVT::i32;
3310 OutVT = MVT::i16;
3311 }
3312 unsigned SubSizeInBits = SrcSizeInBits / 2;
3313 InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
3314 OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
3315
3316 // Split lower/upper subvectors.
3317 SDValue Lo = extractSubVector(In, 0, DAG, DL, SubSizeInBits);
3318 SDValue Hi = extractSubVector(In, NumElems / 2, DAG, DL, SubSizeInBits);
3319
3320 // 256bit -> 128bit truncate - Narrow lower/upper 128-bit subvectors.
3321 if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
3322 Lo = DAG.getBitcast(InVT, Lo);
3323 Hi = DAG.getBitcast(InVT, Hi);
3324 SDValue Res = DAG.getNode(WebAssemblyISD::NARROW_U, DL, OutVT, Lo, Hi);
3325 return DAG.getBitcast(DstVT, Res);
3326 }
3327
3328 // Recursively narrow lower/upper subvectors, concat result and narrow again.
3329 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems / 2);
3330 Lo = truncateVectorWithNARROW(PackedVT, Lo, DL, DAG);
3331 Hi = truncateVectorWithNARROW(PackedVT, Hi, DL, DAG);
3332
3333 PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
3334 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
3335 return truncateVectorWithNARROW(DstVT, Res, DL, DAG);
3336}
3337
3340 auto &DAG = DCI.DAG;
3341
3342 SDValue In = N->getOperand(0);
3343 EVT InVT = In.getValueType();
3344 if (!InVT.isSimple())
3345 return SDValue();
3346
3347 EVT OutVT = N->getValueType(0);
3348 if (!OutVT.isVector())
3349 return SDValue();
3350
3351 EVT OutSVT = OutVT.getVectorElementType();
3352 EVT InSVT = InVT.getVectorElementType();
3353 // Currently only cover truncate to v16i8 or v8i16.
3354 if (!((InSVT == MVT::i16 || InSVT == MVT::i32 || InSVT == MVT::i64) &&
3355 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && OutVT.is128BitVector()))
3356 return SDValue();
3357
3358 SDLoc DL(N);
3360 OutVT.getScalarSizeInBits());
3361 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
3362 return truncateVectorWithNARROW(OutVT, In, DL, DAG);
3363}
3364
3367 using namespace llvm::SDPatternMatch;
3368 auto &DAG = DCI.DAG;
3369 SDLoc DL(N);
3370 SDValue Src = N->getOperand(0);
3371 EVT VT = N->getValueType(0);
3372 EVT SrcVT = Src.getValueType();
3373
3374 if (!(DCI.isBeforeLegalize() && VT.isScalarInteger() &&
3375 SrcVT.isFixedLengthVector() && SrcVT.getScalarType() == MVT::i1))
3376 return SDValue();
3377
3378 unsigned NumElts = SrcVT.getVectorNumElements();
3379 EVT Width = MVT::getIntegerVT(128 / NumElts);
3380
3381 // bitcast <N x i1> to iN, where N = 2, 4, 8, 16 (legal)
3382 // ==> bitmask
3383 if (NumElts == 2 || NumElts == 4 || NumElts == 8 || NumElts == 16) {
3384 return DAG.getZExtOrTrunc(
3385 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
3386 {DAG.getConstant(Intrinsic::wasm_bitmask, DL, MVT::i32),
3387 DAG.getSExtOrTrunc(N->getOperand(0), DL,
3388 SrcVT.changeVectorElementType(
3389 *DAG.getContext(), Width))}),
3390 DL, VT);
3391 }
3392
3393 // bitcast <N x i1>(setcc ...) to concat iN, where N = 32 and 64 (illegal)
3394 if (NumElts == 32 || NumElts == 64) {
3395 SDValue Concat, SetCCVector;
3396 ISD::CondCode SetCond;
3397
3398 if (!sd_match(N, m_BitCast(m_c_SetCC(m_Value(Concat), m_Value(SetCCVector),
3399 m_CondCode(SetCond)))))
3400 return SDValue();
3401 if (Concat.getOpcode() != ISD::CONCAT_VECTORS)
3402 return SDValue();
3403
3404 // Reconstruct the wide bitmask from each CONCAT_VECTORS operand.
3405 // Derive the per-chunk mask/integer types from the actual operand type
3406 // instead of hardcoding v16i1 / i16 for every chunk.
3407 EVT ConcatOperandVT = Concat.getOperand(0).getValueType();
3408 unsigned ConcatOperandNumElts = ConcatOperandVT.getVectorNumElements();
3409
3410 EVT ConcatOperandMaskVT =
3411 EVT::getVectorVT(*DAG.getContext(), MVT::i1,
3412 ElementCount::getFixed(ConcatOperandNumElts));
3413 EVT ConcatOperandBitmaskVT =
3414 EVT::getIntegerVT(*DAG.getContext(), ConcatOperandNumElts);
3415 EVT ReturnVT = N->getValueType(0);
3416 SDValue ReconstructedBitmask = DAG.getConstant(0, DL, ReturnVT);
3417 // Example:
3418 // v32i16 = concat(v8i16, v8i16, v8i16, v8i16)
3419 // -> v8i1 + v8i1 + v8i1 + v8i1
3420 // -> i8 + i8 + i8 + i8
3421 // -> reconstructed i32 bitmask
3422 for (size_t I = 0; I < Concat->ops().size(); ++I) {
3423 SDValue ConcatOperand = Concat.getOperand(I);
3424 assert(ConcatOperand.getValueType() == ConcatOperandVT &&
3425 "concat_vectors operands must have the same type");
3426
3427 SDValue SetCCVectorOperand =
3428 extractSubVector(SetCCVector, I * ConcatOperandNumElts, DAG, DL, 128);
3429 if (!SetCCVectorOperand ||
3430 SetCCVectorOperand.getValueType() != ConcatOperandVT)
3431 return SDValue();
3432
3433 // Build the per-chunk mask using the correct chunk type:
3434 // v16i8 -> v16i1 -> i16
3435 // v8i16 -> v8i1 -> i8
3436 // v4i32 -> v4i1 -> i4
3437 // v2i64 -> v2i1 -> i2
3438 SDValue ConcatOperandMask = DAG.getSetCC(
3439 DL, ConcatOperandMaskVT, ConcatOperand, SetCCVectorOperand, SetCond);
3440 SDValue ConcatOperandBitmask =
3441 DAG.getBitcast(ConcatOperandBitmaskVT, ConcatOperandMask);
3442 SDValue ExtendedConcatOperandBitmask =
3443 DAG.getZExtOrTrunc(ConcatOperandBitmask, DL, ReturnVT);
3444
3445 // Shift the previously reconstructed bits to make room for this chunk.
3446 if (I != 0) {
3447 ReconstructedBitmask = DAG.getNode(
3448 ISD::SHL, DL, ReturnVT, ReconstructedBitmask,
3449 DAG.getShiftAmountConstant(ConcatOperandNumElts, ReturnVT, DL));
3450 }
3451
3452 // Merge disjoint partial bitmasks with OR.
3453 ReconstructedBitmask =
3454 DAG.getNode(ISD::OR, DL, ReturnVT, ReconstructedBitmask,
3455 ExtendedConcatOperandBitmask);
3456 }
3457
3458 return ReconstructedBitmask;
3459 }
3460
3461 return SDValue();
3462}
3463
3465 // bitmask (setcc <X>, 0, setlt) => bitmask X
3466 assert(N->getOpcode() == ISD::INTRINSIC_WO_CHAIN);
3467 using namespace llvm::SDPatternMatch;
3468
3469 if (N->getConstantOperandVal(0) != Intrinsic::wasm_bitmask)
3470 return SDValue();
3471
3472 SDValue LHS;
3473 if (!sd_match(N->getOperand(1), m_c_SetCC(m_Value(LHS), m_Zero(),
3475 return SDValue();
3476
3477 SDLoc DL(N);
3478 return DAG.getNode(
3479 ISD::INTRINSIC_WO_CHAIN, DL, N->getValueType(0),
3480 {DAG.getConstant(Intrinsic::wasm_bitmask, DL, MVT::i32), LHS});
3481}
3482
3484 // any_true (setcc <X>, 0, eq) => (not (all_true X))
3485 // all_true (setcc <X>, 0, eq) => (not (any_true X))
3486 // any_true (setcc <X>, 0, ne) => (any_true X)
3487 // all_true (setcc <X>, 0, ne) => (all_true X)
3488 assert(N->getOpcode() == ISD::INTRINSIC_WO_CHAIN);
3489 using namespace llvm::SDPatternMatch;
3490
3491 SDValue LHS;
3492 if (N->getNumOperands() < 2 ||
3493 !sd_match(N->getOperand(1),
3495 return SDValue();
3496 EVT LT = LHS.getValueType();
3497 if (LT.getScalarSizeInBits() > 128 / LT.getVectorNumElements())
3498 return SDValue();
3499
3500 auto CombineSetCC = [&N, &DAG](Intrinsic::WASMIntrinsics InPre,
3501 ISD::CondCode SetType,
3502 Intrinsic::WASMIntrinsics InPost) {
3503 if (N->getConstantOperandVal(0) != InPre)
3504 return SDValue();
3505
3506 SDValue LHS;
3507 if (!sd_match(N->getOperand(1), m_c_SetCC(m_Value(LHS), m_Zero(),
3508 m_SpecificCondCode(SetType))))
3509 return SDValue();
3510
3511 SDLoc DL(N);
3512 SDValue Ret = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
3513 {DAG.getConstant(InPost, DL, MVT::i32), LHS});
3514 if (SetType == ISD::SETEQ)
3515 Ret = DAG.getNode(ISD::XOR, DL, MVT::i32, Ret,
3516 DAG.getConstant(1, DL, MVT::i32));
3517 return DAG.getZExtOrTrunc(Ret, DL, N->getValueType(0));
3518 };
3519
3520 if (SDValue AnyTrueEQ = CombineSetCC(Intrinsic::wasm_anytrue, ISD::SETEQ,
3521 Intrinsic::wasm_alltrue))
3522 return AnyTrueEQ;
3523 if (SDValue AllTrueEQ = CombineSetCC(Intrinsic::wasm_alltrue, ISD::SETEQ,
3524 Intrinsic::wasm_anytrue))
3525 return AllTrueEQ;
3526 if (SDValue AnyTrueNE = CombineSetCC(Intrinsic::wasm_anytrue, ISD::SETNE,
3527 Intrinsic::wasm_anytrue))
3528 return AnyTrueNE;
3529 if (SDValue AllTrueNE = CombineSetCC(Intrinsic::wasm_alltrue, ISD::SETNE,
3530 Intrinsic::wasm_alltrue))
3531 return AllTrueNE;
3532
3533 return SDValue();
3534}
3535
3536template <int MatchRHS, ISD::CondCode MatchCond, bool RequiresNegate,
3537 Intrinsic::ID Intrin>
3539 SDValue LHS = N->getOperand(0);
3540 SDValue RHS = N->getOperand(1);
3541 SDValue Cond = N->getOperand(2);
3542 if (MatchCond != cast<CondCodeSDNode>(Cond)->get())
3543 return SDValue();
3544
3545 if (MatchRHS != cast<ConstantSDNode>(RHS)->getSExtValue())
3546 return SDValue();
3547
3548 SDLoc DL(N);
3549 SDValue Ret =
3550 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
3551 {DAG.getConstant(Intrin, DL, MVT::i32),
3552 DAG.getSExtOrTrunc(LHS->getOperand(0), DL, VecVT)});
3553 if (RequiresNegate)
3554 Ret = DAG.getNode(ISD::XOR, DL, MVT::i32, Ret,
3555 DAG.getConstant(1, DL, MVT::i32));
3556 return DAG.getZExtOrTrunc(Ret, DL, N->getValueType(0));
3557}
3558
3559/// Try to convert a i128 comparison to a v16i8 comparison before type
3560/// legalization splits it up into chunks
3561static SDValue
3563 const WebAssemblySubtarget *Subtarget) {
3564
3565 SDLoc DL(N);
3566 SDValue X = N->getOperand(0);
3567 SDValue Y = N->getOperand(1);
3568 EVT VT = N->getValueType(0);
3569 EVT OpVT = X.getValueType();
3570
3571 SelectionDAG &DAG = DCI.DAG;
3573 Attribute::NoImplicitFloat))
3574 return SDValue();
3575
3576 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
3577 // We're looking for an oversized integer equality comparison with SIMD
3578 if (!OpVT.isScalarInteger() || !OpVT.isByteSized() || OpVT != MVT::i128 ||
3579 !Subtarget->hasSIMD128() || !isIntEqualitySetCC(CC))
3580 return SDValue();
3581
3582 // Don't perform this combine if constructing the vector will be expensive.
3583 auto IsVectorBitCastCheap = [](SDValue X) {
3585 return isa<ConstantSDNode>(X) || X.getOpcode() == ISD::LOAD;
3586 };
3587
3588 if (!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y))
3589 return SDValue();
3590
3591 SDValue VecX = DAG.getBitcast(MVT::v16i8, X);
3592 SDValue VecY = DAG.getBitcast(MVT::v16i8, Y);
3593 SDValue Cmp = DAG.getSetCC(DL, MVT::v16i8, VecX, VecY, CC);
3594
3595 SDValue Intr =
3596 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
3597 {DAG.getConstant(CC == ISD::SETEQ ? Intrinsic::wasm_alltrue
3598 : Intrinsic::wasm_anytrue,
3599 DL, MVT::i32),
3600 Cmp});
3601
3602 return DAG.getSetCC(DL, VT, Intr, DAG.getConstant(0, DL, MVT::i32),
3603 ISD::SETNE);
3604}
3605
3608 const WebAssemblySubtarget *Subtarget) {
3609 if (!DCI.isBeforeLegalize())
3610 return SDValue();
3611
3612 EVT VT = N->getValueType(0);
3613 if (!VT.isScalarInteger())
3614 return SDValue();
3615
3616 if (SDValue V = combineVectorSizedSetCCEquality(N, DCI, Subtarget))
3617 return V;
3618
3619 SDValue LHS = N->getOperand(0);
3620 if (LHS->getOpcode() != ISD::BITCAST)
3621 return SDValue();
3622
3623 EVT FromVT = LHS->getOperand(0).getValueType();
3624 if (!FromVT.isFixedLengthVector() || FromVT.getVectorElementType() != MVT::i1)
3625 return SDValue();
3626
3627 unsigned NumElts = FromVT.getVectorNumElements();
3628 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
3629 return SDValue();
3630
3631 if (!cast<ConstantSDNode>(N->getOperand(1)))
3632 return SDValue();
3633
3634 auto &DAG = DCI.DAG;
3635 EVT VecVT = FromVT.changeVectorElementType(*DAG.getContext(),
3636 MVT::getIntegerVT(128 / NumElts));
3637 // setcc (iN (bitcast (vNi1 X))), 0, ne
3638 // ==> any_true (vNi1 X)
3640 N, VecVT, DAG)) {
3641 return Match;
3642 }
3643 // setcc (iN (bitcast (vNi1 X))), 0, eq
3644 // ==> xor (any_true (vNi1 X)), -1
3646 N, VecVT, DAG)) {
3647 return Match;
3648 }
3649 // setcc (iN (bitcast (vNi1 X))), -1, eq
3650 // ==> all_true (vNi1 X)
3652 N, VecVT, DAG)) {
3653 return Match;
3654 }
3655 // setcc (iN (bitcast (vNi1 X))), -1, ne
3656 // ==> xor (all_true (vNi1 X)), -1
3658 N, VecVT, DAG)) {
3659 return Match;
3660 }
3661 return SDValue();
3662}
3663
3665 EVT VT = N->getValueType(0);
3666 if (VT != MVT::v8i32 && VT != MVT::v16i32)
3667 return SDValue();
3668
3669 // Mul with extending inputs.
3670 SDValue LHS = N->getOperand(0);
3671 SDValue RHS = N->getOperand(1);
3672 if (LHS.getOpcode() != RHS.getOpcode())
3673 return SDValue();
3674
3675 if (LHS.getOpcode() != ISD::SIGN_EXTEND &&
3676 LHS.getOpcode() != ISD::ZERO_EXTEND)
3677 return SDValue();
3678
3679 if (LHS->getOperand(0).getValueType() != RHS->getOperand(0).getValueType())
3680 return SDValue();
3681
3682 EVT FromVT = LHS->getOperand(0).getValueType();
3683 EVT EltTy = FromVT.getVectorElementType();
3684 if (EltTy != MVT::i8)
3685 return SDValue();
3686
3687 // For an input DAG that looks like this
3688 // %a = input_type
3689 // %b = input_type
3690 // %lhs = extend %a to output_type
3691 // %rhs = extend %b to output_type
3692 // %mul = mul %lhs, %rhs
3693
3694 // input_type | output_type | instructions
3695 // v16i8 | v16i32 | %low = i16x8.extmul_low_i8x16_ %a, %b
3696 // | | %high = i16x8.extmul_high_i8x16_, %a, %b
3697 // | | %low_low = i32x4.ext_low_i16x8_ %low
3698 // | | %low_high = i32x4.ext_high_i16x8_ %low
3699 // | | %high_low = i32x4.ext_low_i16x8_ %high
3700 // | | %high_high = i32x4.ext_high_i16x8_ %high
3701 // | | %res = concat_vector(...)
3702 // v8i8 | v8i32 | %low = i16x8.extmul_low_i8x16_ %a, %b
3703 // | | %low_low = i32x4.ext_low_i16x8_ %low
3704 // | | %low_high = i32x4.ext_high_i16x8_ %low
3705 // | | %res = concat_vector(%low_low, %low_high)
3706
3707 SDLoc DL(N);
3708 unsigned NumElts = VT.getVectorNumElements();
3709 SDValue ExtendInLHS = LHS->getOperand(0);
3710 SDValue ExtendInRHS = RHS->getOperand(0);
3711 bool IsSigned = LHS->getOpcode() == ISD::SIGN_EXTEND;
3712 unsigned ExtendLowOpc =
3713 IsSigned ? WebAssemblyISD::EXTEND_LOW_S : WebAssemblyISD::EXTEND_LOW_U;
3714 unsigned ExtendHighOpc =
3715 IsSigned ? WebAssemblyISD::EXTEND_HIGH_S : WebAssemblyISD::EXTEND_HIGH_U;
3716
3717 auto GetExtendLow = [&DAG, &DL, &ExtendLowOpc](EVT VT, SDValue Op) {
3718 return DAG.getNode(ExtendLowOpc, DL, VT, Op);
3719 };
3720 auto GetExtendHigh = [&DAG, &DL, &ExtendHighOpc](EVT VT, SDValue Op) {
3721 return DAG.getNode(ExtendHighOpc, DL, VT, Op);
3722 };
3723
3724 if (NumElts == 16) {
3725 SDValue LowLHS = GetExtendLow(MVT::v8i16, ExtendInLHS);
3726 SDValue LowRHS = GetExtendLow(MVT::v8i16, ExtendInRHS);
3727 SDValue MulLow = DAG.getNode(ISD::MUL, DL, MVT::v8i16, LowLHS, LowRHS);
3728 SDValue HighLHS = GetExtendHigh(MVT::v8i16, ExtendInLHS);
3729 SDValue HighRHS = GetExtendHigh(MVT::v8i16, ExtendInRHS);
3730 SDValue MulHigh = DAG.getNode(ISD::MUL, DL, MVT::v8i16, HighLHS, HighRHS);
3731 SDValue SubVectors[] = {
3732 GetExtendLow(MVT::v4i32, MulLow),
3733 GetExtendHigh(MVT::v4i32, MulLow),
3734 GetExtendLow(MVT::v4i32, MulHigh),
3735 GetExtendHigh(MVT::v4i32, MulHigh),
3736 };
3737 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, SubVectors);
3738 } else {
3739 assert(NumElts == 8);
3740 SDValue LowLHS = DAG.getNode(LHS->getOpcode(), DL, MVT::v8i16, ExtendInLHS);
3741 SDValue LowRHS = DAG.getNode(RHS->getOpcode(), DL, MVT::v8i16, ExtendInRHS);
3742 SDValue MulLow = DAG.getNode(ISD::MUL, DL, MVT::v8i16, LowLHS, LowRHS);
3743 SDValue Lo = GetExtendLow(MVT::v4i32, MulLow);
3744 SDValue Hi = GetExtendHigh(MVT::v4i32, MulLow);
3745 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
3746 }
3747 return SDValue();
3748}
3749
3752 assert(N->getOpcode() == ISD::MUL);
3753 EVT VT = N->getValueType(0);
3754 if (!VT.isVector())
3755 return SDValue();
3756
3757 if (auto Res = TryWideExtMulCombine(N, DCI.DAG))
3758 return Res;
3759
3760 // We don't natively support v16i8 or v8i8 mul, but we do support v8i16. So,
3761 // extend them to v8i16.
3762 if (VT != MVT::v8i8 && VT != MVT::v16i8)
3763 return SDValue();
3764
3765 SDLoc DL(N);
3766 SelectionDAG &DAG = DCI.DAG;
3767 SDValue LHS = N->getOperand(0);
3768 SDValue RHS = N->getOperand(1);
3769 EVT MulVT = MVT::v8i16;
3770
3771 if (VT == MVT::v8i8) {
3772 SDValue PromotedLHS = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, LHS,
3773 DAG.getUNDEF(MVT::v8i8));
3774 SDValue PromotedRHS = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, RHS,
3775 DAG.getUNDEF(MVT::v8i8));
3776 SDValue LowLHS =
3777 DAG.getNode(WebAssemblyISD::EXTEND_LOW_U, DL, MulVT, PromotedLHS);
3778 SDValue LowRHS =
3779 DAG.getNode(WebAssemblyISD::EXTEND_LOW_U, DL, MulVT, PromotedRHS);
3780 SDValue MulLow = DAG.getBitcast(
3781 MVT::v16i8, DAG.getNode(ISD::MUL, DL, MulVT, LowLHS, LowRHS));
3782 // Take the low byte of each lane.
3783 SDValue Shuffle = DAG.getVectorShuffle(
3784 MVT::v16i8, DL, MulLow, DAG.getUNDEF(MVT::v16i8),
3785 {0, 2, 4, 6, 8, 10, 12, 14, -1, -1, -1, -1, -1, -1, -1, -1});
3786 return extractSubVector(Shuffle, 0, DAG, DL, 64);
3787 } else {
3788 assert(VT == MVT::v16i8 && "Expected v16i8");
3789 SDValue LowLHS = DAG.getNode(WebAssemblyISD::EXTEND_LOW_U, DL, MulVT, LHS);
3790 SDValue LowRHS = DAG.getNode(WebAssemblyISD::EXTEND_LOW_U, DL, MulVT, RHS);
3791 SDValue HighLHS =
3792 DAG.getNode(WebAssemblyISD::EXTEND_HIGH_U, DL, MulVT, LHS);
3793 SDValue HighRHS =
3794 DAG.getNode(WebAssemblyISD::EXTEND_HIGH_U, DL, MulVT, RHS);
3795
3796 SDValue MulLow =
3797 DAG.getBitcast(VT, DAG.getNode(ISD::MUL, DL, MulVT, LowLHS, LowRHS));
3798 SDValue MulHigh =
3799 DAG.getBitcast(VT, DAG.getNode(ISD::MUL, DL, MulVT, HighLHS, HighRHS));
3800
3801 // Take the low byte of each lane.
3802 return DAG.getVectorShuffle(
3803 VT, DL, MulLow, MulHigh,
3804 {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30});
3805 }
3806}
3807
3808SDValue DoubleVectorWidth(SDValue In, unsigned RequiredNumElems,
3809 SelectionDAG &DAG) {
3810 SDLoc DL(In);
3811 LLVMContext &Ctx = *DAG.getContext();
3812 EVT InVT = In.getValueType();
3813 unsigned NumElems = InVT.getVectorNumElements() * 2;
3814 EVT OutVT = EVT::getVectorVT(Ctx, InVT.getVectorElementType(), NumElems);
3815 SDValue Concat =
3816 DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, In, DAG.getPOISON(InVT));
3817 if (NumElems < RequiredNumElems) {
3818 return DoubleVectorWidth(Concat, RequiredNumElems, DAG);
3819 }
3820 return Concat;
3821}
3822
3824 EVT OutVT = N->getValueType(0);
3825 if (!OutVT.isVector())
3826 return SDValue();
3827
3828 EVT OutElTy = OutVT.getVectorElementType();
3829 if (OutElTy != MVT::i8 && OutElTy != MVT::i16)
3830 return SDValue();
3831
3832 unsigned NumElems = OutVT.getVectorNumElements();
3833 if (!isPowerOf2_32(NumElems))
3834 return SDValue();
3835
3836 EVT FPVT = N->getOperand(0)->getValueType(0);
3837 if (FPVT.getVectorElementType() != MVT::f32)
3838 return SDValue();
3839
3840 SDLoc DL(N);
3841
3842 // First, convert to i32.
3843 LLVMContext &Ctx = *DAG.getContext();
3844 EVT IntVT = EVT::getVectorVT(Ctx, MVT::i32, NumElems);
3845 SDValue ToInt = DAG.getNode(N->getOpcode(), DL, IntVT, N->getOperand(0));
3847 OutVT.getScalarSizeInBits());
3848 // Mask out the top MSBs.
3849 SDValue Masked =
3850 DAG.getNode(ISD::AND, DL, IntVT, ToInt, DAG.getConstant(Mask, DL, IntVT));
3851
3852 if (OutVT.getSizeInBits() < 128) {
3853 // Create a wide enough vector that we can use narrow.
3854 EVT NarrowedVT = OutElTy == MVT::i8 ? MVT::v16i8 : MVT::v8i16;
3855 unsigned NumRequiredElems = NarrowedVT.getVectorNumElements();
3856 SDValue WideVector = DoubleVectorWidth(Masked, NumRequiredElems, DAG);
3857 SDValue Trunc = truncateVectorWithNARROW(NarrowedVT, WideVector, DL, DAG);
3858 return DAG.getBitcast(
3859 OutVT, extractSubVector(Trunc, 0, DAG, DL, OutVT.getSizeInBits()));
3860 } else {
3861 return truncateVectorWithNARROW(OutVT, Masked, DL, DAG);
3862 }
3863 return SDValue();
3864}
3865
3866// Wide vector shift operations such as v8i32 with sign-extended
3867// operands cause Type Legalizer crashes because the target-specific
3868// extension nodes cannot be directly mapped to the 256-bit size.
3869//
3870// To resolve the crash and optimize performance, we intercept the
3871// illegal v8i32 shift in DAGCombine. We convert the shift amounts
3872// into multipliers and manually split the vector into two v4i32 halves.
3873//
3874// Before: t1: v8i32 = shl (sign_extend v8i16), const_vec
3875// After : t2: v4i32 = mul (ext_low_s v8i16), (ext_low_s narrow_vec)
3876// t3: v4i32 = mul (ext_high_s v8i16), (ext_high_s narrow_vec)
3877// t4: v8i32 = concat_vectors t2, t3
3880 SelectionDAG &DAG = DCI.DAG;
3881 assert(N->getOpcode() == ISD::SHL);
3882 EVT VT = N->getValueType(0);
3883 if (VT != MVT::v8i32)
3884 return SDValue();
3885
3886 SDValue LHS = N->getOperand(0);
3887 SDValue RHS = N->getOperand(1);
3888 unsigned ExtOpc = LHS.getOpcode();
3889 if (ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND)
3890 return SDValue();
3891
3892 if (RHS.getOpcode() != ISD::BUILD_VECTOR)
3893 return SDValue();
3894
3895 SDLoc DL(N);
3896 SDValue ExtendIn = LHS.getOperand(0);
3897 EVT FromVT = ExtendIn.getValueType();
3898 if (FromVT != MVT::v8i16)
3899 return SDValue();
3900
3901 unsigned NumElts = VT.getVectorNumElements();
3902 unsigned BitWidth = FromVT.getScalarSizeInBits();
3903 bool IsSigned = (ExtOpc == ISD::SIGN_EXTEND);
3904 unsigned MaxValidShift = IsSigned ? (BitWidth - 1) : BitWidth;
3905 SmallVector<SDValue, 16> MulConsts;
3906 for (unsigned I = 0; I < NumElts; ++I) {
3907 auto *C = dyn_cast<ConstantSDNode>(RHS.getOperand(I));
3908 if (!C)
3909 return SDValue();
3910
3911 const APInt &ShiftAmt = C->getAPIntValue();
3912 if (ShiftAmt.uge(MaxValidShift))
3913 return SDValue();
3914
3915 APInt MulAmt = APInt::getOneBitSet(BitWidth, ShiftAmt.getZExtValue());
3916 MulConsts.push_back(DAG.getConstant(MulAmt, DL, FromVT.getScalarType(),
3917 /*isTarget=*/false, /*isOpaque=*/true));
3918 }
3919
3920 SDValue NarrowConst = DAG.getBuildVector(FromVT, DL, MulConsts);
3921 unsigned ExtLowOpc =
3922 IsSigned ? WebAssemblyISD::EXTEND_LOW_S : WebAssemblyISD::EXTEND_LOW_U;
3923 unsigned ExtHighOpc =
3924 IsSigned ? WebAssemblyISD::EXTEND_HIGH_S : WebAssemblyISD::EXTEND_HIGH_U;
3925
3926 EVT HalfVT = MVT::v4i32;
3927 SDValue LHSLo = DAG.getNode(ExtLowOpc, DL, HalfVT, ExtendIn);
3928 SDValue LHSHi = DAG.getNode(ExtHighOpc, DL, HalfVT, ExtendIn);
3929 SDValue RHSLo = DAG.getNode(ExtLowOpc, DL, HalfVT, NarrowConst);
3930 SDValue RHSHi = DAG.getNode(ExtHighOpc, DL, HalfVT, NarrowConst);
3931 SDValue MulLo = DAG.getNode(ISD::MUL, DL, HalfVT, LHSLo, RHSLo);
3932 SDValue MulHi = DAG.getNode(ISD::MUL, DL, HalfVT, LHSHi, RHSHi);
3933 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, MulLo, MulHi);
3934}
3935
3936SDValue
3937WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
3938 DAGCombinerInfo &DCI) const {
3939 switch (N->getOpcode()) {
3940 default:
3941 return SDValue();
3942 case ISD::BITCAST:
3943 return performBitcastCombine(N, DCI);
3944 case ISD::SETCC:
3945 return performSETCCCombine(N, DCI, Subtarget);
3947 return performVECTOR_SHUFFLECombine(N, DCI);
3948 case ISD::SIGN_EXTEND:
3949 case ISD::ZERO_EXTEND:
3950 return performVectorExtendCombine(N, DCI);
3951 case ISD::UINT_TO_FP:
3952 if (auto ExtCombine = performVectorExtendToFPCombine(N, DCI))
3953 return ExtCombine;
3954 return performVectorNonNegToFPCombine(N, DCI);
3955 case ISD::SINT_TO_FP:
3956 return performVectorExtendToFPCombine(N, DCI);
3959 case ISD::FP_ROUND:
3961 return performVectorTruncZeroCombine(N, DCI);
3962 case ISD::FP_TO_SINT:
3963 case ISD::FP_TO_UINT:
3964 return performConvertFPCombine(N, DCI.DAG);
3965 case ISD::TRUNCATE:
3966 return performTruncateCombine(N, DCI);
3968 if (SDValue V = performBitmaskCombine(N, DCI.DAG))
3969 return V;
3970 return performAnyAllCombine(N, DCI.DAG);
3971 }
3972 case ISD::MUL:
3973 return performMulCombine(N, DCI);
3974 case ISD::SHL:
3975 return performShiftCombine(N, DCI);
3976 }
3977}
static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
static SDValue performTruncateCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI)
return SDValue()
static SDValue performSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis false
Function Alias Analysis Results
static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg, SDValue Val={})
#define X(NUM, ENUM, NAME)
Definition ELF.h:853
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Hexagon Common GEP
const HexagonInstrInfo * TII
#define _
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define T
MachineInstr unsigned OpIdx
static SDValue performVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const RISCVTargetLowering &TLI)
static SDValue combineVectorSizedSetCCEquality(EVT VT, SDValue X, SDValue Y, ISD::CondCode CC, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
Try to map an integer comparison with size > XLEN to vector instructions before type legalization spl...
const SmallVectorImpl< MachineOperand > & Cond
Contains matchers for matching SelectionDAG nodes and values.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static bool callingConvSupported(CallingConv::ID CallConv)
static MachineBasicBlock * LowerFPToInt(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool IsUnsigned, bool Int64, bool Float64, unsigned LoweredOpcode)
static SDValue TryWideExtMulCombine(SDNode *N, SelectionDAG &DAG)
static MachineBasicBlock * LowerMemcpy(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool Int64)
static std::optional< unsigned > IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG)
static SDValue performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performVectorNonNegToFPCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG)
static SDValue performAnyAllCombine(SDNode *N, SelectionDAG &DAG)
static MachineBasicBlock * LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB, const WebAssemblySubtarget *Subtarget, const TargetInstrInfo &TII)
static SDValue TryMatchTrue(SDNode *N, EVT VecVT, SelectionDAG &DAG)
static SDValue GetExtendHigh(SDValue Op, unsigned UserOpc, EVT VT, SelectionDAG &DAG)
SDValue performConvertFPCombine(SDNode *N, SelectionDAG &DAG)
static SDValue performBitmaskCombine(SDNode *N, SelectionDAG &DAG)
static SDValue performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static bool IsWebAssemblyGlobal(SDValue Op)
static MachineBasicBlock * LowerMemset(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool Int64)
static bool HasNoSignedZerosOrNaNs(SDValue Op, SelectionDAG &DAG)
SDValue DoubleVectorWidth(SDValue In, unsigned RequiredNumElems, SelectionDAG &DAG)
static SDValue performVectorExtendToFPCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Convert ({u,s}itofp vec) --> ({u,s}itofp ({s,z}ext vec)) so it doesn't get split up into scalar instr...
static SDValue performShiftCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue LowerConvertLow(SDValue Op, SelectionDAG &DAG)
static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG, const SDLoc &DL, unsigned VectorWidth)
static SDValue performBitcastCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue truncateVectorWithNARROW(EVT DstVT, SDValue In, const SDLoc &DL, SelectionDAG &DAG)
This file defines the interfaces that WebAssembly uses to lower LLVM code into a selection DAG.
This file provides WebAssembly-specific target descriptions.
This file declares WebAssembly-specific per-machine-function information.
This file declares the WebAssembly-specific subclass of TargetSubtarget.
This file declares the WebAssembly-specific subclass of TargetMachine.
This file contains the declaration of the WebAssembly-specific type parsing utility functions.
This file contains the declaration of the WebAssembly-specific utility functions.
X86 cmov Conversion
static constexpr int Concat[]
Value * RHS
Value * LHS
The Input class is used to parse a yaml document into in-memory structs and vectors.
Class for arbitrary precision integers.
Definition APInt.h:78
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1563
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
Definition APInt.h:1408
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition APInt.h:297
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1228
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
an instruction that atomically reads a memory location, combines it with another value,...
@ Add
*p = old + v
@ Sub
*p = old - v
@ And
*p = old & v
@ Xor
*p = old ^ v
BinOp getOperation() const
LLVM Basic Block Representation.
Definition BasicBlock.h:62
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
A debug info location.
Definition DebugLoc.h:123
Diagnostic information for unsupported feature in backend.
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:728
LLVM_ABI unsigned getAddressSpace() const
const GlobalValue * getGlobal() const
ThreadLocalMode getThreadLocalMode() const
Type * getValueType() const
unsigned getTargetFlags() const
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Tracks which library functions to use for a particular subtarget.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
Describe properties that are true of each instruction in the target description file.
void setNoStrip() const
Machine Value Type.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
@ INVALID_SIMPLE_VALUE_TYPE
static auto integer_fixedlen_vector_valuetypes()
SimpleValueType SimpleTy
MVT changeVectorElementType(MVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
bool isFixedLengthVector() const
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
mop_range defs()
Returns all explicit operands that are register definitions.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
mop_range uses()
Returns all operands which may be register uses.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI MachineInstrBundleIterator< MachineInstr > eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const std::vector< MachineJumpTableEntry > & getJumpTables() const
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
unsigned getAddressSpace() const
Return the address space for the associated pointer.
MachineMemOperand * getMemOperand() const
Return the unique MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isUndef() const
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI bool isKnownNeverLogicalZero(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
Test whether the given floating point SDValue (or all elements of it, if it is a vector) is known to ...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI SDValue getSplatValue(SDValue V, bool LegalTypes=false)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false, SDNodeFlags Flags={})
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
const TargetMachine & getTarget() const
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI bool isKnownNeverNaN(SDValue Op, const APInt &DemandedElts, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN in...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
MachineFunction & getMachineFunction() const
SDValue getPOISON(EVT VT)
Return a POISON node. POISON does not have a useful SDLoc.
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
constexpr size_t size() const
Get the string size.
Definition StringRef.h:144
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setPartialReduceMLAAction(unsigned Opc, MVT AccVT, MVT InputVT, LegalizeAction Action)
Indicate how a PARTIAL_REDUCE_U/SMLA node with Acc type AccVT and Input type InputVT should be treate...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::LibcallImpl LibcallImpl, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition Type.h:275
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
Definition Type.cpp:291
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:290
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition Value.cpp:713
static std::optional< unsigned > getLocalForStackObject(MachineFunction &MF, int FrameIndex)
WebAssemblyTargetLowering(const TargetMachine &TM, const WebAssemblySubtarget &STI)
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const override
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const override
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
self_iterator getIterator()
Definition ilist_node.h:123
#define INT64_MIN
Definition DataTypes.h:74
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Swift
Calling convention for Swift.
Definition CallingConv.h:69
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition CallingConv.h:63
@ CXX_FAST_TLS
Used for access functions.
Definition CallingConv.h:72
@ WASM_EmscriptenInvoke
For emscripten __invoke_* functions.
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition CallingConv.h:47
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition CallingConv.h:87
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:823
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ PARTIAL_REDUCE_SMLA
PARTIAL_REDUCE_[U|S]MLA(Accumulator, Input1, Input2) The partial reduction nodes sign or zero extend ...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:275
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:783
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:294
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:264
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:518
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition ISDOpcodes.h:220
@ GlobalAddress
Definition ISDOpcodes.h:88
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:884
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition ISDOpcodes.h:584
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition ISDOpcodes.h:747
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
Definition ISDOpcodes.h:914
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:280
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
Definition ISDOpcodes.h:528
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition ISDOpcodes.h:997
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition ISDOpcodes.h:254
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ GlobalTLSAddress
Definition ISDOpcodes.h:89
@ PARTIAL_REDUCE_UMLA
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:848
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
Definition ISDOpcodes.h:665
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ BR_CC
BR_CC - Conditional branch.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition ISDOpcodes.h:374
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
Definition ISDOpcodes.h:247
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition ISDOpcodes.h:672
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:704
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:769
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition ISDOpcodes.h:649
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition ISDOpcodes.h:614
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition ISDOpcodes.h:576
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
Definition ISDOpcodes.h:224
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:854
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:815
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
Definition ISDOpcodes.h:903
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:892
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition ISDOpcodes.h:727
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition ISDOpcodes.h:982
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition ISDOpcodes.h:110
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:930
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
Definition ISDOpcodes.h:179
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:739
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition ISDOpcodes.h:205
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:304
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition ISDOpcodes.h:565
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition ISDOpcodes.h:53
@ ExternalSymbol
Definition ISDOpcodes.h:93
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition ISDOpcodes.h:963
@ CLEAR_CACHE
llvm.clear_cache intrinsic Operands: Input Chain, Start Addres, End Address Outputs: Output Chain
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
Definition ISDOpcodes.h:925
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Definition ISDOpcodes.h:949
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:860
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:837
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:534
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition ISDOpcodes.h:365
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition ISDOpcodes.h:213
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition ISDOpcodes.h:556
LLVM_ABI bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
OperandFlags
These are flags set on operands, but should be considered private, all access should go through the M...
Definition MCInstrDesc.h:51
auto m_Value()
Match an arbitrary value and ignore it.
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
bool sd_match(SDNode *N, const SelectionDAG *DAG, Pattern &&P)
CondCode_match m_SpecificCondCode(ISD::CondCode CC)
Match a conditional code SDNode with a specific ISD::CondCode.
CondCode_match m_CondCode()
Match any conditional code SDNode.
TernaryOpc_match< T0_P, T1_P, T2_P, true, false > m_c_SetCC(const T0_P &LHS, const T1_P &RHS, const T2_P &CC)
MCSymbolWasm * getOrCreateFunctionTableSymbol(MCContext &Ctx, const WebAssemblySubtarget *Subtarget)
Returns the __indirect_function_table, for use in call_indirect and in function bitcasts.
bool isWebAssemblyFuncrefType(const Type *Ty)
Return true if this is a WebAssembly Funcref Type.
bool isWebAssemblyTableType(const Type *Ty)
Return true if the table represents a WebAssembly table type.
MCSymbolWasm * getOrCreateFuncrefCallTableSymbol(MCContext &Ctx, const WebAssemblySubtarget *Subtarget)
Returns the __funcref_call_table, for use in funcref calls when lowered to table.set + call_indirect.
bool isValidAddressSpace(unsigned AS)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo, const LibcallLoweringInfo *libcallLowering)
bool canLowerReturn(size_t ResultSize, const WebAssemblySubtarget *Subtarget)
Returns true if the function's return value(s) can be lowered directly, i.e., not indirectly via a po...
bool isWasmVarAddressSpace(unsigned AS)
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:315
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition MathExtras.h:344
@ Offset
Definition DWP.cpp:557
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
void computeSignatureVTs(const FunctionType *Ty, const Function *TargetFunc, const Function &ContextFunc, const TargetMachine &TM, SmallVectorImpl< MVT > &Params, SmallVectorImpl< MVT > &Results)
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1668
SDValue peekThroughFreeze(SDValue V)
Return the non-frozen source operand of V if it exists.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Add
Sum of integers.
DWARFExpression::Operation Op
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
Definition STLExtras.h:2087
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1771
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition STLExtras.h:2191
void computeLegalValueVTs(const WebAssemblyTargetLowering &TLI, LLVMContext &Ctx, const DataLayout &DL, Type *Ty, SmallVectorImpl< MVT > &ValueVTs)
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition MathExtras.h:373
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:876
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition ValueTypes.h:90
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:145
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition ValueTypes.h:70
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition ValueTypes.h:155
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:381
bool isByteSized() const
Return true if the bit size is a multiple of 8.
Definition ValueTypes.h:251
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:393
EVT changeVectorElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
Definition ValueTypes.h:98
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:324
bool is128BitVector() const
Return true if this is a 128-bit vector type.
Definition ValueTypes.h:215
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition ValueTypes.h:61
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition ValueTypes.h:389
EVT widenIntegerVectorElementType(LLVMContext &Context) const
Return a VT for an integer vector type with the size of the elements doubled.
Definition ValueTypes.h:460
bool isFixedLengthVector() const
Definition ValueTypes.h:189
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:176
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition ValueTypes.h:331
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
Definition ValueTypes.h:300
bool is256BitVector() const
Return true if this is a 256-bit vector type.
Definition ValueTypes.h:220
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition ValueTypes.h:336
EVT changeElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a type whose attributes match ourselves with the exception of the element type that i...
Definition ValueTypes.h:121
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition ValueTypes.h:165
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition ValueTypes.h:344
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
Definition ValueTypes.h:469
Align getNonZeroOrigAlign() const
unsigned getByValSize() const
bool isInConsecutiveRegsLast() const
Align getNonZeroByValAlign() const
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
Matching combinators.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
This structure is used to pass arguments to makeLibCall function.